diff --git a/CHANGELOG.md b/CHANGELOG.md index 6fa7bb149605..5d308c1c455d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Minikube Release Notes +# Version 0.26.0 - 4/3/2018 +* Update to Kubernetes 1.10 [#2657](https://github.com/kubernetes/minikube/pull/2657) +* Update Nginx Ingress Plugin to 0.12.0 [#2644](https://github.com/kubernetes/minikube/pull/2644) +* [Minikube ISO] Add SSHFS Support to the Minikube ISO [#2600](https://github.com/kubernetes/minikube/pull/2600) +* Upgrade Docker to 17.12 [#2597](https://github.com/kubernetes/minikube/pull/2597) +* Deactivate HSTS in Ingress by default [#2591](https://github.com/kubernetes/minikube/pull/2591) +* Add ValidatingAdmissionWebhook admission controller [#2590](https://github.com/kubernetes/minikube/pull/2590) +* Upgrade docker-machine to fix Hyper-v name conflict [#2586](https://github.com/kubernetes/minikube/pull/2586) +* Upgrade Core DNS Addon to 1.0.6 [#2584](https://github.com/kubernetes/minikube/pull/2584) +* Add metrics server Addon [#2566](https://github.com/kubernetes/minikube/pull/2566) +* Allow nesting in KVM driver [#2555](https://github.com/kubernetes/minikube/pull/2555) +* Add MutatingAdmissionWebhook admission controller [#2547](https://github.com/kubernetes/minikube/pull/2547) +* [Minikube ISO] Add Netfilter module to the ISO for Calico [#2490](https://github.com/kubernetes/minikube/pull/2490) +* Add memory and request limit to EFK Addon [#2465](https://github.com/kubernetes/minikube/pull/2465) + # Version 0.25.0 - 1/26/2018 * Add freshpod addon [#2423](https://github.com/kubernetes/minikube/pull/2423) * List addons in consistent sort order [#2446](https://github.com/kubernetes/minikube/pull/2446) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 82ff8628b778..46efb86b8e4e 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -27,33 +27,38 @@ }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/compute", - "Comment": "v11.1.1-beta", - "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" + "Comment": "v12.4.0-beta", + "Rev": "f111fc2fa3861c5fdced76cae4c9c71821969577" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/containerregistry", - "Comment": "v11.1.1-beta", - "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" + "Comment": "v12.4.0-beta", + "Rev": "f111fc2fa3861c5fdced76cae4c9c71821969577" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/disk", - "Comment": "v11.1.1-beta", - "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" + "Comment": "v12.4.0-beta", + "Rev": "f111fc2fa3861c5fdced76cae4c9c71821969577" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/network", - "Comment": "v11.1.1-beta", - "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" + "Comment": "v12.4.0-beta", + "Rev": "f111fc2fa3861c5fdced76cae4c9c71821969577" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/storage", - "Comment": "v11.1.1-beta", - "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" + "Comment": "v12.4.0-beta", + "Rev": "f111fc2fa3861c5fdced76cae4c9c71821969577" + }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute", + "Comment": "v12.4.0-beta", + "Rev": "f111fc2fa3861c5fdced76cae4c9c71821969577" }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/storage", - "Comment": "v11.1.1-beta", - "Rev": "509eea43b93cec2f3f17acbe2578ef58703923f8" + "Comment": "v12.4.0-beta", + "Rev": "f111fc2fa3861c5fdced76cae4c9c71821969577" }, { "ImportPath": "github.com/Azure/go-ansiterm", @@ -65,33 +70,33 @@ }, { "ImportPath": "github.com/Azure/go-autorest/autorest", - "Comment": "v9.1.0", - "Rev": "e14a70c556c8e0db173358d1a903dca345a8e75e" + "Comment": "v9.9.0", + "Rev": "d4e6b95c12a08b4de2d48b45d5b4d594e5d32fab" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/adal", - "Comment": "v9.1.0", - "Rev": "e14a70c556c8e0db173358d1a903dca345a8e75e" + "Comment": "v9.9.0", + "Rev": "d4e6b95c12a08b4de2d48b45d5b4d594e5d32fab" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/azure", - "Comment": "v9.1.0", - "Rev": "e14a70c556c8e0db173358d1a903dca345a8e75e" + "Comment": "v9.9.0", + "Rev": "d4e6b95c12a08b4de2d48b45d5b4d594e5d32fab" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/date", - "Comment": "v9.1.0", - "Rev": "e14a70c556c8e0db173358d1a903dca345a8e75e" + "Comment": "v9.9.0", + "Rev": "d4e6b95c12a08b4de2d48b45d5b4d594e5d32fab" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/to", - "Comment": "v9.1.0", - "Rev": "e14a70c556c8e0db173358d1a903dca345a8e75e" + "Comment": "v9.9.0", + "Rev": "d4e6b95c12a08b4de2d48b45d5b4d594e5d32fab" }, { "ImportPath": "github.com/Azure/go-autorest/autorest/validation", - "Comment": "v9.1.0", - "Rev": "e14a70c556c8e0db173358d1a903dca345a8e75e" + "Comment": "v9.9.0", + "Rev": "d4e6b95c12a08b4de2d48b45d5b4d594e5d32fab" }, { "ImportPath": "github.com/JeffAshton/win_pdh", @@ -330,11 +335,6 @@ "Comment": "v3.5.0", "Rev": "b38d23b8782a487059e8fc8773e9a5b228a77cb6" }, - { - "ImportPath": "github.com/boltdb/bolt", - "Comment": "v1.3.0", - "Rev": "583e8937c61f1af6513608ccc75c97b6abdf4ff9" - }, { "ImportPath": "github.com/c4milo/gotoolkit", "Rev": "bcc06269efa974c4f098619d9aae436846e83d84" @@ -410,7 +410,7 @@ }, { "ImportPath": "github.com/cockroachdb/cmux", - "Rev": "30d10be492927e2dcae0089c374c455d42414fcb" + "Rev": "112f0506e7743d64a6eb8fedbcff13d9979bbf92" }, { "ImportPath": "github.com/codedellemc/goscaleio", @@ -421,8 +421,13 @@ "Rev": "20e2ce2cf8852dc78bd42b76698dcd8dcd77b7b1" }, { - "ImportPath": "github.com/container-storage-interface/spec/lib/go/csi", - "Rev": "ec298903f94e1d6d954de121b28044a2e1fdbf48" + "ImportPath": "github.com/container-storage-interface/spec/lib/go/csi/v0", + "Comment": "v0.2.0", + "Rev": "35d9f9d77954980e449e52c3f3e43c21bd8171f5" + }, + { + "ImportPath": "github.com/containerd/console", + "Rev": "84eeaae905fa414d03e07bcd6c8d3f19e7cf180e" }, { "ImportPath": "github.com/containerd/containerd/api/services/containers/v1", @@ -563,299 +568,369 @@ "ImportPath": "github.com/containers/image/version", "Rev": "ba72d1c8f590a19d182a25fd8671156766ab3c60" }, + { + "ImportPath": "github.com/containers/storage/pkg/fileutils", + "Rev": "ff8a6d2bf496daf46ab1a153f783a0f6b8762a54" + }, { "ImportPath": "github.com/containers/storage/pkg/homedir", - "Rev": "87cb51ae7a688abb71dbe6e4ac5c962d9121b862" + "Rev": "ff8a6d2bf496daf46ab1a153f783a0f6b8762a54" + }, + { + "ImportPath": "github.com/containers/storage/pkg/idtools", + "Rev": "ff8a6d2bf496daf46ab1a153f783a0f6b8762a54" + }, + { + "ImportPath": "github.com/containers/storage/pkg/mount", + "Rev": "ff8a6d2bf496daf46ab1a153f783a0f6b8762a54" + }, + { + "ImportPath": "github.com/containers/storage/pkg/system", + "Rev": "ff8a6d2bf496daf46ab1a153f783a0f6b8762a54" + }, + { + "ImportPath": "github.com/coreos/bbolt", + "Comment": "v1.3.1-coreos.6", + "Rev": "48ea1b39c25fc1bab3506fbc712ecbaa842c4d2d" }, { "ImportPath": "github.com/coreos/etcd/alarm", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/auth", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/auth/authpb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/client", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/clientv3", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/clientv3/concurrency", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/compactor", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/discovery", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/embed", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/error", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/etcdhttp", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v2http/httptypes", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3client", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/auth", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/etcdserver/etcdserverpb/gw", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/membership", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/etcdserver/stats", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/lease", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/lease/leasehttp", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/lease/leasepb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/mvcc", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/mvcc/backend", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/mvcc/mvccpb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/adt", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/contention", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/cors", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/cpuutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/crc", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/debugutil", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/fileutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/httputil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/idutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/ioutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/logutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/monotime", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/netutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/pathutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/pbutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/runtime", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/schedule", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/pkg/srv", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/tlsutil", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/transport", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/types", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/pkg/wait", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" + }, + { + "ImportPath": "github.com/coreos/etcd/proxy/grpcproxy/adapter", + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/raft", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/raft/raftpb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/rafthttp", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/snap", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/snap/snappb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/store", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/version", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/wal", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { "ImportPath": "github.com/coreos/etcd/wal/walpb", - "Comment": "v3.1.10", - "Rev": "0520cb9304cb2385f7e72b8bc02d6e4d3257158a" + "Comment": "v3.2.13", + "Rev": "95a726a27e09030f9ccbd9982a1508f5a6d25ada" }, { - "ImportPath": "github.com/coreos/go-oidc/http", - "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" - }, - { - "ImportPath": "github.com/coreos/go-oidc/jose", - "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" - }, - { - "ImportPath": "github.com/coreos/go-oidc/key", - "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" - }, - { - "ImportPath": "github.com/coreos/go-oidc/oauth2", - "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" - }, - { - "ImportPath": "github.com/coreos/go-oidc/oidc", - "Rev": "a4973d9a4225417aecf5d450a9522f00c1f7130f" + "ImportPath": "github.com/coreos/go-oidc", + "Rev": "065b426bd41667456c1a924468f507673629c46b" }, { "ImportPath": "github.com/coreos/go-semver/semver", @@ -896,21 +971,6 @@ "Comment": "v2-8-gfa29b1d", "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" }, - { - "ImportPath": "github.com/coreos/pkg/health", - "Comment": "v2-8-gfa29b1d", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" - }, - { - "ImportPath": "github.com/coreos/pkg/httputil", - "Comment": "v2-8-gfa29b1d", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" - }, - { - "ImportPath": "github.com/coreos/pkg/timeutil", - "Comment": "v2-8-gfa29b1d", - "Rev": "fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8" - }, { "ImportPath": "github.com/coreos/rkt/api/v1alpha", "Comment": "v1.25.0", @@ -921,6 +981,11 @@ "Comment": "v1.0.4", "Rev": "71acacd42f85e5e82f70a55327789582a5200a90" }, + { + "ImportPath": "github.com/cyphar/filepath-securejoin", + "Comment": "v0.2.1-1-gae69057", + "Rev": "ae69057f2299fb9e5ba2df738607e6a505b74ab6" + }, { "ImportPath": "github.com/d2g/dhcp4", "Rev": "a1d1b6c41b1ce8a71a5121a9cee31809c4707d9c" @@ -1133,13 +1198,18 @@ "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, + { + "ImportPath": "github.com/docker/docker/pkg/parsers", + "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", + "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" + }, { "ImportPath": "github.com/docker/docker/pkg/stdcopy", "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, { - "ImportPath": "github.com/docker/docker/pkg/symlink", + "ImportPath": "github.com/docker/docker/pkg/sysinfo", "Comment": "docs-v1.12.0-rc4-2016-07-15-7401-g4f3616fb1", "Rev": "4f3616fb1c112e206b88cb7a9922bf49067a7756" }, @@ -1194,163 +1264,163 @@ }, { "ImportPath": "github.com/docker/machine/commands/mcndirs", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/drivers/errdriver", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/drivers/hyperv", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/drivers/none", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/drivers/virtualbox", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/drivers/vmwarefusion", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/auth", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/cert", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/check", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/drivers", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/drivers/plugin", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/drivers/plugin/localbinary", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/drivers/rpc", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/engine", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/host", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/log", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/mcndockerclient", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/mcnerror", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/mcnflag", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/mcnutils", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/persist", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/provision", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/provision/pkgaction", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/provision/serviceaction", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/shell", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/ssh", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/state", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/swarm", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/version", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/libmachine/versioncmp", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/machine/version", - "Comment": "docs-v0.8.2-2016-09-26-292-g49dfaa70", - "Rev": "49dfaa70fdc869c65d9f6c50c355624356ab383b" + "Comment": "docs-v0.8.2-2016-09-26-351-ga950319e", + "Rev": "a950319ea4041615add4a243e684b1baadb6436f" }, { "ImportPath": "github.com/docker/spdystream", @@ -1436,7 +1506,7 @@ }, { "ImportPath": "github.com/go-openapi/spec", - "Rev": "7abd5745472fff5eb3685386d5fb8bf38683154d" + "Rev": "1de3e0542de65ad8d75452a595886fdd0befb363" }, { "ImportPath": "github.com/go-openapi/strfmt", @@ -1448,7 +1518,7 @@ }, { "ImportPath": "github.com/go-openapi/validate", - "Rev": "deaf2c9013bc1a7f4c774662259a506ba874d80f" + "Rev": "d509235108fcf6ab4913d2dcb3a2260c0db2108e" }, { "ImportPath": "github.com/godbus/dbus", @@ -1496,6 +1566,10 @@ "ImportPath": "github.com/golang/protobuf/proto", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" }, + { + "ImportPath": "github.com/golang/protobuf/protoc-gen-go/descriptor", + "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" + }, { "ImportPath": "github.com/golang/protobuf/ptypes", "Rev": "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" @@ -1526,213 +1600,213 @@ }, { "ImportPath": "github.com/google/cadvisor/accelerators", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/api", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/cache/memory", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/collector", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/container", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/container/common", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/container/containerd", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/container/crio", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/container/docker", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/container/libcontainer", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/container/raw", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/container/rkt", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/container/systemd", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/devicemapper", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/events", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/fs", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/healthz", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/http", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/http/mux", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/info/v1", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/info/v2", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/machine", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/manager", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/raw", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/manager/watcher/rkt", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/metrics", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/pages", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/pages/static", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/storage", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/summary", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/utils", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/utils/cloudinfo", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/utils/cpuload/netlink", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/utils/docker", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/utils/oomparser", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/utils/sysfs", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/utils/sysinfo", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/validate", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/version", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/cadvisor/zfs", - "Comment": "v0.28.3", - "Rev": "1e567c2ac359c3ed1303e0c80b6cf08edefc841d" + "Comment": "v0.29.1", + "Rev": "2e02d28350c5fbbad9cfb7e5a1733468b75ab3f9" }, { "ImportPath": "github.com/google/certificate-transparency/go", @@ -1772,123 +1846,123 @@ }, { "ImportPath": "github.com/gophercloud/gophercloud", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/common/extensions", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/images", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/compute/v2/servers", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/networks", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/networking/v2/ports", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/openstack/utils", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gophercloud/gophercloud/pagination", - "Rev": "8183543f90d1aef267a5ecc209f2e0715b355acb" + "Rev": "6da026c32e2d622cc242d32984259c77237aefe1" }, { "ImportPath": "github.com/gorilla/mux", @@ -1914,18 +1988,18 @@ }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime", - "Comment": "v1.1.0-25-g84398b9", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + "Comment": "v1.3.0", + "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/runtime/internal", - "Comment": "v1.1.0-25-g84398b9", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + "Comment": "v1.3.0", + "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" }, { "ImportPath": "github.com/grpc-ecosystem/grpc-gateway/utilities", - "Comment": "v1.1.0-25-g84398b9", - "Rev": "84398b94e188ee336f307779b57b3aa91af7063c" + "Comment": "v1.3.0", + "Rev": "8cc3a55af3bcf171a1c23a90c4df9cf591706104" }, { "ImportPath": "github.com/hashicorp/errwrap", @@ -2052,12 +2126,8 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Comment": "1.0.0", - "Rev": "36b14963da70d11297d313183d7e6388c8510e1e" - }, - { - "ImportPath": "github.com/juju/ratelimit", - "Rev": "5b9ff866471762aa2ab2dced63c9fb6f53921342" + "Comment": "1.0.4-7-g13f8643", + "Rev": "13f86432b882000a51c6e610c620974462691a97" }, { "ImportPath": "github.com/kardianos/osext", @@ -2127,6 +2197,11 @@ "ImportPath": "github.com/mailru/easyjson/jwriter", "Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d" }, + { + "ImportPath": "github.com/marstr/guid", + "Comment": "v1.1.0-2-g8bdf7d1", + "Rev": "8bdf7d1a087ccc975cf37dd6507da50698fd19ca" + }, { "ImportPath": "github.com/mattn/go-runewidth", "Comment": "v0.0.1-10-g737072b", @@ -2206,78 +2281,83 @@ }, { "ImportPath": "github.com/opencontainers/runc/libcontainer", - "Comment": "v1.0.0-rc4-50-g4d6e6720", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-221-g595bea02", + "Rev": "595bea022f077a9e17d7473b34fbaf1adaed9e43" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/apparmor", - "Comment": "v1.0.0-rc4-50-g4d6e6720", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-221-g595bea02", + "Rev": "595bea022f077a9e17d7473b34fbaf1adaed9e43" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups", - "Comment": "v1.0.0-rc4-50-g4d6e6720", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-221-g595bea02", + "Rev": "595bea022f077a9e17d7473b34fbaf1adaed9e43" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs", - "Comment": "v1.0.0-rc4-50-g4d6e6720", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" - }, - { - "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/rootless", - "Comment": "v1.0.0-rc4-50-g4d6e6720", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-221-g595bea02", + "Rev": "595bea022f077a9e17d7473b34fbaf1adaed9e43" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/systemd", - "Comment": "v1.0.0-rc4-50-g4d6e6720", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-221-g595bea02", + "Rev": "595bea022f077a9e17d7473b34fbaf1adaed9e43" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/configs", - "Comment": "v1.0.0-rc4-50-g4d6e6720", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-221-g595bea02", + "Rev": "595bea022f077a9e17d7473b34fbaf1adaed9e43" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/configs/validate", - "Comment": "v1.0.0-rc4-50-g4d6e6720", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-221-g595bea02", + "Rev": "595bea022f077a9e17d7473b34fbaf1adaed9e43" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/criurpc", - "Comment": "v1.0.0-rc4-50-g4d6e6720", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-221-g595bea02", + "Rev": "595bea022f077a9e17d7473b34fbaf1adaed9e43" + }, + { + "ImportPath": "github.com/opencontainers/runc/libcontainer/intelrdt", + "Comment": "v1.0.0-rc4-221-g595bea02", + "Rev": "595bea022f077a9e17d7473b34fbaf1adaed9e43" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/keys", - "Comment": "v1.0.0-rc4-50-g4d6e6720", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-221-g595bea02", + "Rev": "595bea022f077a9e17d7473b34fbaf1adaed9e43" + }, + { + "ImportPath": "github.com/opencontainers/runc/libcontainer/mount", + "Comment": "v1.0.0-rc4-221-g595bea02", + "Rev": "595bea022f077a9e17d7473b34fbaf1adaed9e43" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/seccomp", - "Comment": "v1.0.0-rc4-50-g4d6e6720", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-221-g595bea02", + "Rev": "595bea022f077a9e17d7473b34fbaf1adaed9e43" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/stacktrace", - "Comment": "v1.0.0-rc4-50-g4d6e6720", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-221-g595bea02", + "Rev": "595bea022f077a9e17d7473b34fbaf1adaed9e43" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/system", - "Comment": "v1.0.0-rc4-50-g4d6e6720", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-221-g595bea02", + "Rev": "595bea022f077a9e17d7473b34fbaf1adaed9e43" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/user", - "Comment": "v1.0.0-rc4-50-g4d6e6720", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-221-g595bea02", + "Rev": "595bea022f077a9e17d7473b34fbaf1adaed9e43" }, { "ImportPath": "github.com/opencontainers/runc/libcontainer/utils", - "Comment": "v1.0.0-rc4-50-g4d6e6720", - "Rev": "4d6e6720a7c885c37b4cb083c0d372dda3425120" + "Comment": "v1.0.0-rc4-221-g595bea02", + "Rev": "595bea022f077a9e17d7473b34fbaf1adaed9e43" }, { "ImportPath": "github.com/opencontainers/runtime-spec/specs-go", @@ -2331,6 +2411,14 @@ "ImportPath": "github.com/pkg/sftp", "Rev": "4d0e916071f68db74f8a73926335f809396d6b42" }, + { + "ImportPath": "github.com/pquerna/cachecontrol", + "Rev": "0dec1b30a0215bb68605dfc568e8855066c9202d" + }, + { + "ImportPath": "github.com/pquerna/cachecontrol/cacheobject", + "Rev": "0dec1b30a0215bb68605dfc568e8855066c9202d" + }, { "ImportPath": "github.com/prometheus/client_golang/prometheus", "Comment": "v0.8.0-83-ge7e9030", @@ -2368,7 +2456,7 @@ }, { "ImportPath": "github.com/quobyte/api", - "Rev": "cb10db90715b14d4784465d2fa3b915dfacc0628" + "Rev": "f2b94aa4aa4f8fcf279fe667ccd916abe6a064d5" }, { "ImportPath": "github.com/r2d4/external-storage/lib/controller", @@ -2414,9 +2502,9 @@ "Rev": "91d7393ff85980ba3a8966405871a3d446ca28f2" }, { - "ImportPath": "github.com/satori/uuid", - "Comment": "v1.1.0-8-g5bf94b6", - "Rev": "5bf94b69c6b68ee1b541973bb8e1144db23a194b" + "ImportPath": "github.com/satori/go.uuid", + "Comment": "v1.1.0", + "Rev": "879c5887cd475cd7864858769793b2ceb0d44feb" }, { "ImportPath": "github.com/seccomp/libseccomp-golang", @@ -2449,11 +2537,13 @@ }, { "ImportPath": "github.com/spf13/cobra", - "Rev": "f62e98d28ab7ad31d707ba837a966378465c7b57" + "Comment": "v0.0.1-32-g6644d46", + "Rev": "6644d46b81fa1831979c4cded0106e774e0ef0ab" }, { "ImportPath": "github.com/spf13/cobra/doc", - "Rev": "f62e98d28ab7ad31d707ba837a966378465c7b57" + "Comment": "v0.0.1-32-g6644d46", + "Rev": "6644d46b81fa1831979c4cded0106e774e0ef0ab" }, { "ImportPath": "github.com/spf13/jwalterweatherman", @@ -2461,7 +2551,8 @@ }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7" + "Comment": "v1.0.0-10-g4c012f6", + "Rev": "4c012f6dcd9546820e378d0bdda4d8fc772cdfea" }, { "ImportPath": "github.com/spf13/viper", @@ -2469,24 +2560,24 @@ "Rev": "25b30aa063fc18e48662b86996252eabdcf2f0c7" }, { - "ImportPath": "github.com/square/go-jose", - "Rev": "789a4c4bd4c118f7564954f441b29c153ccd6a96" + "ImportPath": "github.com/storageos/go-api", + "Comment": "0.3.4", + "Rev": "3a4032328d99c1b43fbda3d85bd3c80fa06e1707" }, { - "ImportPath": "github.com/square/go-jose/cipher", - "Rev": "789a4c4bd4c118f7564954f441b29c153ccd6a96" + "ImportPath": "github.com/storageos/go-api/netutil", + "Comment": "0.3.4", + "Rev": "3a4032328d99c1b43fbda3d85bd3c80fa06e1707" }, { - "ImportPath": "github.com/square/go-jose/json", - "Rev": "789a4c4bd4c118f7564954f441b29c153ccd6a96" - }, - { - "ImportPath": "github.com/storageos/go-api", - "Rev": "74f9beb613cacf0cc282facc2e1550a3231e126f" + "ImportPath": "github.com/storageos/go-api/serror", + "Comment": "0.3.4", + "Rev": "3a4032328d99c1b43fbda3d85bd3c80fa06e1707" }, { "ImportPath": "github.com/storageos/go-api/types", - "Rev": "74f9beb613cacf0cc282facc2e1550a3231e126f" + "Comment": "0.3.4", + "Rev": "3a4032328d99c1b43fbda3d85bd3c80fa06e1707" }, { "ImportPath": "github.com/syndtr/gocapability/capability", @@ -2498,105 +2589,110 @@ }, { "ImportPath": "github.com/vishvananda/netlink", - "Rev": "f5a6f697a596c788d474984a38a0ac4ba0719e93" + "Rev": "f67b75edbf5e3bb7dfe70bb788610693a71be3d1" }, { "ImportPath": "github.com/vishvananda/netlink/nl", - "Rev": "f5a6f697a596c788d474984a38a0ac4ba0719e93" + "Rev": "f67b75edbf5e3bb7dfe70bb788610693a71be3d1" }, { "ImportPath": "github.com/vishvananda/netns", - "Rev": "86bef332bfc3b59b7624a600bd53009ce91a9829" + "Rev": "be1fbeda19366dea804f00efff2dd73a1642fdcc" }, { "ImportPath": "github.com/vmware/govmomi", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/find", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/list", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" + }, + { + "ImportPath": "github.com/vmware/govmomi/nfc", + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/object", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/pbm", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/pbm/methods", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/pbm/types", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/property", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/session", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/task", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/vim25", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/vim25/debug", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/vim25/methods", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/vim25/mo", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/vim25/progress", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/vim25/soap", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/vim25/types", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/govmomi/vim25/xml", - "Comment": "v0.14.0-11-gb8b228c", - "Rev": "b8b228cfbad7f0a69ed90393ca9aee085d3c6ef1" + "Comment": "v0.16.0-97-g0f82f03", + "Rev": "0f82f03a2bbf14037d2331cf02f1d4157bbef6cc" }, { "ImportPath": "github.com/vmware/photon-controller-go-sdk/SSPI", @@ -2811,6 +2907,10 @@ "ImportPath": "golang.org/x/sys/windows/registry", "Rev": "95c6576299259db960f6c5b9b69ea52422860fce" }, + { + "ImportPath": "golang.org/x/sys/windows/svc", + "Rev": "95c6576299259db960f6c5b9b69ea52422860fce" + }, { "ImportPath": "golang.org/x/text/cases", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" @@ -2875,41 +2975,53 @@ "ImportPath": "golang.org/x/text/width", "Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01" }, + { + "ImportPath": "golang.org/x/time/rate", + "Rev": "f51c12702a4d776e4c1fa9b0fabab841babae631" + }, { "ImportPath": "golang.org/x/tools/container/intsets", "Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32" }, { "ImportPath": "google.golang.org/api/cloudmonitoring/v2beta2", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "7f657476956314fee258816aaf81c0ff65cf8bee" }, { "ImportPath": "google.golang.org/api/compute/v0.alpha", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "7f657476956314fee258816aaf81c0ff65cf8bee" }, { "ImportPath": "google.golang.org/api/compute/v0.beta", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "7f657476956314fee258816aaf81c0ff65cf8bee" }, { "ImportPath": "google.golang.org/api/compute/v1", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "7f657476956314fee258816aaf81c0ff65cf8bee" }, { "ImportPath": "google.golang.org/api/container/v1", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "7f657476956314fee258816aaf81c0ff65cf8bee" }, { "ImportPath": "google.golang.org/api/gensupport", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "7f657476956314fee258816aaf81c0ff65cf8bee" }, { "ImportPath": "google.golang.org/api/googleapi", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "7f657476956314fee258816aaf81c0ff65cf8bee" }, { "ImportPath": "google.golang.org/api/googleapi/internal/uritemplates", - "Rev": "654f863362977d69086620b5f72f13e911da2410" + "Rev": "7f657476956314fee258816aaf81c0ff65cf8bee" + }, + { + "ImportPath": "google.golang.org/api/tpu/v1alpha1", + "Rev": "7f657476956314fee258816aaf81c0ff65cf8bee" + }, + { + "ImportPath": "google.golang.org/genproto/googleapis/api/annotations", + "Rev": "09f6ed296fc66555a25fe4ce95173148778dfa85" }, { "ImportPath": "google.golang.org/genproto/googleapis/rpc/status", @@ -2917,73 +3029,93 @@ }, { "ImportPath": "google.golang.org/grpc", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/balancer", + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/codes", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/connectivity", + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/credentials", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { - "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "ImportPath": "google.golang.org/grpc/grpclb/grpc_lb_v1/messages", + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/grpclog", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/health/grpc_health_v1", + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/internal", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/keepalive", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/metadata", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/naming", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/peer", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" + }, + { + "ImportPath": "google.golang.org/grpc/resolver", + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/stats", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/status", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/tap", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "google.golang.org/grpc/transport", - "Comment": "v1.3.0", - "Rev": "d2e1b51f33ff8c5e4a15560ff049d200e83726c5" + "Comment": "v1.7.5", + "Rev": "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" }, { "ImportPath": "gopkg.in/cheggaaa/pb.v1", @@ -3020,6 +3152,26 @@ "Comment": "v1.0-16-g20b71e5", "Rev": "20b71e5b60d756d3d2f80def009790325acc2b23" }, + { + "ImportPath": "gopkg.in/square/go-jose.v2", + "Comment": "v2.1.3", + "Rev": "f8f38de21b4dcd69d0413faf231983f5fd6634b1" + }, + { + "ImportPath": "gopkg.in/square/go-jose.v2/cipher", + "Comment": "v2.1.3", + "Rev": "f8f38de21b4dcd69d0413faf231983f5fd6634b1" + }, + { + "ImportPath": "gopkg.in/square/go-jose.v2/json", + "Comment": "v2.1.3", + "Rev": "f8f38de21b4dcd69d0413faf231983f5fd6634b1" + }, + { + "ImportPath": "gopkg.in/square/go-jose.v2/jwt", + "Comment": "v2.1.3", + "Rev": "f8f38de21b4dcd69d0413faf231983f5fd6634b1" + }, { "ImportPath": "gopkg.in/warnings.v0", "Comment": "v0.1.1", @@ -3027,7 +3179,7 @@ }, { "ImportPath": "gopkg.in/yaml.v2", - "Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77" + "Rev": "670d4cfef0544295bc27a114dbac37980d83185a" }, { "ImportPath": "k8s.io/heapster/metrics/api/v1/types", @@ -3036,3426 +3188,3511 @@ }, { "ImportPath": "k8s.io/kube-openapi/pkg/aggregator", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "50ae88d24ede7b8bad68e23c805b5d3da5c8abaf" }, { "ImportPath": "k8s.io/kube-openapi/pkg/builder", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "50ae88d24ede7b8bad68e23c805b5d3da5c8abaf" }, { "ImportPath": "k8s.io/kube-openapi/pkg/common", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "50ae88d24ede7b8bad68e23c805b5d3da5c8abaf" }, { "ImportPath": "k8s.io/kube-openapi/pkg/handler", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "50ae88d24ede7b8bad68e23c805b5d3da5c8abaf" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "50ae88d24ede7b8bad68e23c805b5d3da5c8abaf" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "50ae88d24ede7b8bad68e23c805b5d3da5c8abaf" }, { "ImportPath": "k8s.io/kube-openapi/pkg/util/proto/validation", - "Rev": "39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1" + "Rev": "50ae88d24ede7b8bad68e23c805b5d3da5c8abaf" + }, + { + "ImportPath": "k8s.io/kubernetes/cmd/controller-manager/app", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/cmd/controller-manager/app/options", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-apiserver/app", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-apiserver/app/options", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-controller-manager/app", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/cmd/kube-controller-manager/app/config", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-controller-manager/app/options", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/cmd/kube-proxy/app", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/cmd/kube-scheduler/app", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubeadm/app/constants", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubelet/app", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/cmd/kubelet/app/options", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/endpoints", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/events", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/legacyscheme", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/persistentvolume", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/persistentvolumeclaim", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/pod", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/ref", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/resource", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/service", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/endpoints", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/node", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/pod", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/resource", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/api/v1/service", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/abac", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/abac/latest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/abac/v0", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/abac/v1beta1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admission", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admission/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admission/v1beta1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/v1alpha1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/v1beta1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/admissionregistration/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1beta1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/v1beta2", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/apps/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/v1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v2beta1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1beta1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/batch/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/v1beta1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/certificates/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/helper", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/helper/qos", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/pods", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1/helper", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/v1/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/core/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/events", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/events/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/events/v1beta1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking/v1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/networking/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/v1beta1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/policy/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/rbac/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/scheduling", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/scheduling/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/scheduling/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/v1alpha1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/settings/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1alpha1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/v1beta1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/apis/storage/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/auth/authorizer/abac", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/auth/nodeidentifier", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/bootstrap/api", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/capabilities", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/chaosclient", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/scheme", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/admissionregistration/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/batch/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/events/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/networking/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/rbac/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/scheduling/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/settings/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/storage/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/admissionregistration/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/apps/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/autoscaling/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/batch/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/certificates/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/extensions/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/internalinterfaces", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/networking/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/policy/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/rbac/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/scheduling/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/settings/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/storage/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/leaderelectionconfig", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/admissionregistration/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/apps/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/autoscaling/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/batch/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/certificates/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/core/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/extensions/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/networking/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/policy/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/rbac/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/scheduling/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/settings/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/client/listers/storage/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/client/unversioned", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/aws", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/azure", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/azure/auth", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/cloudstack", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/filter", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/gce/cloud/meta", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/openstack", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/ovirt", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/photon", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/bootstrap", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/certificates", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/certificates/approver", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/certificates/cleaner", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/certificates/signer", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/clusterroleaggregation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/cronjob", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/daemon", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/daemon/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/deployment", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/deployment/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/disruption", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/endpoint", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/garbagecollector", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/history", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/job", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/namespace", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/namespace/deletion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller/node", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "ImportPath": "k8s.io/kubernetes/pkg/controller/nodeipam", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller/node/ipam", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "ImportPath": "k8s.io/kubernetes/pkg/controller/nodeipam/ipam", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller/node/ipam/cidrset", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "ImportPath": "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/cidrset", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller/node/ipam/sync", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "ImportPath": "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/sync", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller/node/scheduler", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "ImportPath": "k8s.io/kubernetes/pkg/controller/nodelifecycle", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller/node/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "ImportPath": "k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/podautoscaler", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/podgc", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/replicaset", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/replication", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/resourcequota", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/route", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/service", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/serviceaccount", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/statefulset", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/ttl", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/controller/util/node", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/attachdetach/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/events", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/expand", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/expand/cache", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/expand/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/persistentvolume", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { - "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/persistentvolume", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/metrics", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/pvcprotection", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/controller/volume/pvprotection", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/aws", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/azure", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/gcp", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/rancher", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/credentialprovider/secrets", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/features", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/fieldpath", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/generated/openapi", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/admission", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/admission/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/authenticator", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/authorizer", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/options", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubeapiserver/server", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/apps", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/categories", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/templates", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/util/openapi/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/plugins", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/resource", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/scheme", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/hash", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/slice", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/term", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubectl/util/transport", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubectl/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/cri", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1alpha", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/deviceplugin/v1beta1", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/scheme", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1alpha1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/v1beta1", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cadvisor", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/certificate", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/certificate/bootstrap", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/checkpoint", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/client", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/cpuset", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { - "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/deviceplugin", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/cm/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/config", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/configmap", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/container", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/cm", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/dockershim/remote", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/envvars", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/events", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/eviction", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/eviction/api", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/gpu", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/gpu/nvidia", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/images", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/equal", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/files", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/log", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kuberuntime", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/kuberuntime/logs", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/leaky", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/lifecycle", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/logs", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/metrics", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/kubelet/metrics/collectors", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/mountpod", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/cni", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/dns", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/hairpin", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/hostport", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/kubenet", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/network/metrics", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/pleg", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/pod", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/preemption", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/prober", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/prober/results", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/qos", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/remote", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/rkt", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/secret", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/portforward", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/remotecommand", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/stats", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/server/streaming", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/stats", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/status", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/sysctl", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/types", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/cache", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/format", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/ioutils", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/queue", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/sliceutils", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/util/store", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/kubelet/winstats", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/master", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/master/controller/crdregistration", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/master/ports", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/master/reconcilers", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/master/tunneler", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/printers", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/printers/internalversion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/printers/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/exec", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/http", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/probe/tcp", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/scheme", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/v1alpha1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/apis/kubeproxyconfig/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/config", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/healthcheck", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/iptables", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/ipvs", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/metrics", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/userspace", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/winkernel", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/proxy/winuserspace", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/quota", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/quota/evaluator/core", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/quota/generic", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/quota/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/initializerconfiguration/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/mutatingwebhookconfiguration/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/admissionregistration/validatingwebhookconfiguration/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/controllerrevision", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/controllerrevision/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/daemonset", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/daemonset/storage", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/deployment", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/deployment/storage", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/replicaset", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/replicaset/storage", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/statefulset", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/apps/statefulset/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authentication/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authentication/tokenreview", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/localsubjectaccessreview", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/selfsubjectaccessreview", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/selfsubjectrulesreview", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/subjectaccessreview", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/authorization/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/autoscaling/horizontalpodautoscaler/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/autoscaling/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/batch/cronjob", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/batch/cronjob/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/batch/job", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/batch/job/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/batch/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/cachesize", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/certificates/certificates", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/certificates/certificates/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/certificates/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/componentstatus", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/configmap", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/configmap/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/endpoint", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/endpoint/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/event", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/event/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/limitrange", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/limitrange/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/namespace", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/namespace/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/node", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/node/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/node/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/persistentvolume", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/persistentvolume/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/persistentvolumeclaim/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/pod", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/pod/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/pod/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/podtemplate", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/podtemplate/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/rangeallocation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/replicationcontroller", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/replicationcontroller/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/resourcequota", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/resourcequota/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/secret", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/secret/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/allocator", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/allocator/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/ipallocator", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/portallocator", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/portallocator/controller", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/service/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/serviceaccount", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/events/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/controller/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/daemonset", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/daemonset/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/deployment", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/deployment/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/ingress", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/ingress/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/podsecuritypolicy/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/replicaset", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/replicaset/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/extensions/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/networking/networkpolicy", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/networking/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/policy/poddisruptionbudget/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/policy/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrole", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrole/policybased", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrole/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/policybased", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/reconciliation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/role", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/role/policybased", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/role/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/rolebinding", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/rolebinding/policybased", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/rolebinding/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/rbac/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/scheduling/priorityclass", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/scheduling/priorityclass/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/scheduling/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/settings/podpreset", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/settings/podpreset/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/settings/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/storage/rest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/storage/storageclass", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/storage/storageclass/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/storage/volumeattachment", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/registry/storage/volumeattachment/storage", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/routes", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/algorithm", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/algorithmprovider", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/api", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/api/latest", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/api/v1", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/api/validation", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/core", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/factory", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/metrics", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/schedulercache", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/util", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/scheduler/volumebinder", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/apparmor", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/apparmor", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/capabilities", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/group", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/selinux", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/sysctl", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/user", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/securitycontext", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/serviceaccount", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/ssh", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/async", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/bandwidth", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/config", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/configz", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/conntrack", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/dbus", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ebtables", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/env", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/file", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/filesystem", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/flag", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/flock", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/goroutinemap", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/hash", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/interrupt", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/io", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ipconfig", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ipset", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/iptables", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/ipvs", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/keymutex", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/labels", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/limitwriter", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/maps", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/metrics", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/mount", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/util/net", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/net/sets", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/netsh", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/node", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/nsenter", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/oom", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/parsers", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/pointer", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/procfs", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/reflector/prometheus", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/removeall", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/resizefs", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/resourcecontainer", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/rlimit", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/selinux", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/slice", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/strings", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/sysctl", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/system", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/tail", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/taints", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/term", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/tolerations", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/version", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/util/workqueue/prometheus", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/version", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/version/verflag", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/aws_ebs", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/azure_dd", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/azure_file", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/cephfs", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/cinder", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/configmap", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/csi", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/downwardapi", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/empty_dir", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/fc", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/flexvolume", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/flocker", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/gce_pd", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/git_repo", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/glusterfs", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/host_path", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/iscsi", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/local", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/nfs", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/photon_pd", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/portworx", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/projected", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/quobyte", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/rbd", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/scaleio", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/secret", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/storageos", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/volume/util/fs", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/operationexecutor", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/pkg/volume/util/recyclerclient", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/util/types", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { - "ImportPath": "k8s.io/kubernetes/pkg/volume/util/volumehelper", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "ImportPath": "k8s.io/kubernetes/pkg/volume/util/volumepathhandler", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/pkg/volume/vsphere_volume", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { - "ImportPath": "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "ImportPath": "k8s.io/kubernetes/pkg/windows/service", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/admit", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/antiaffinity", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/deny", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/v1alpha1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/eventratelimit/apis/eventratelimit/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/exec", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/extendedresourcetoleration", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/gc", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/imagepolicy", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/initialresources", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/limitranger", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/namespace/exists", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/noderestriction", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/resize", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/persistentvolumeclaim/pvcprotection", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podnodeselector", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podpreset", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/v1alpha1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/priority", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/resourcequota", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/install", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/v1alpha1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/resourcequota/apis/resourcequota/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" + }, + { + "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/storage/storageclass/setdefault", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/storageclass/setdefault", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "ImportPath": "k8s.io/kubernetes/plugin/pkg/admission/storage/storageobjectinuseprotection", + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authorizer/node", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/api", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/api/v1", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/api/validation", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/core", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/factory", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/metrics", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/util", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" - }, - { - "ImportPath": "k8s.io/kubernetes/plugin/pkg/scheduler/volumebinder", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/golang/expansion", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/gonum/graph", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/gonum/graph/internal/linear", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/gonum/graph/simple", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/kubernetes/third_party/forked/gonum/graph/traverse", - "Comment": "v1.9.0", - "Rev": "925c127ec6b946659ad0fd596fa959be43f0cc05" + "Comment": "v1.10.0", + "Rev": "fc32d2f3698e36b93322a3465f63a14e9f0eaead" }, { "ImportPath": "k8s.io/utils/clock", diff --git a/Makefile b/Makefile index 886eeceb61a7..c79f94fa145a 100755 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ # Bump these on release VERSION_MAJOR ?= 0 -VERSION_MINOR ?= 25 +VERSION_MINOR ?= 26 VERSION_BUILD ?= 0 VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).$(VERSION_BUILD) DEB_VERSION ?= $(VERSION_MAJOR).$(VERSION_MINOR)-$(VERSION_BUILD) @@ -26,7 +26,7 @@ HYPERKIT_BUILD_IMAGE ?= karalabe/xgo-1.8.3 BUILD_IMAGE ?= k8s.gcr.io/kube-cross:v1.9.1-1 ISO_BUILD_IMAGE ?= $(REGISTRY)/buildroot-image -ISO_VERSION ?= v0.25.1 +ISO_VERSION ?= v0.26.0 ISO_BUCKET ?= minikube/iso KERNEL_VERSION ?= 4.9.64 @@ -52,18 +52,21 @@ STORAGE_PROVISIONER_TAG := v1.8.1 # Set the version information for the Kubernetes servers, and build localkube statically K8S_VERSION_LDFLAGS := $(shell $(PYTHON) hack/get_k8s_version.py 2>&1) MINIKUBE_LDFLAGS := -X k8s.io/minikube/pkg/version.version=$(VERSION) -X k8s.io/minikube/pkg/version.isoVersion=$(ISO_VERSION) -X k8s.io/minikube/pkg/version.isoPath=$(ISO_BUCKET) -LOCALKUBE_LDFLAGS := "$(K8S_VERSION_LDFLAGS) $(MINIKUBE_LDFLAGS) -s -w -extldflags '-static'" +LOCALKUBE_LDFLAGS := "$(K8S_VERSION_LDFLAGS) $(MINIKUBE_LDFLAGS) -s -w" -LOCALKUBEFILES := GOPATH=$(GOPATH) go list -f '{{join .Deps "\n"}}' ./cmd/localkube/ | grep k8s.io | GOPATH=$(GOPATH) xargs go list -f '{{ range $$file := .GoFiles }} {{$$.Dir}}/{{$$file}}{{"\n"}}{{end}}' -MINIKUBEFILES := GOPATH=$(GOPATH) go list -f '{{join .Deps "\n"}}' ./cmd/minikube/ | grep k8s.io | GOPATH=$(GOPATH) xargs go list -f '{{ range $$file := .GoFiles }} {{$$.Dir}}/{{$$file}}{{"\n"}}{{end}}' -HYPERKIT_FILES := GOPATH=$(GOPATH) go list -f '{{join .Deps "\n"}}' k8s.io/minikube/cmd/drivers/hyperkit | grep k8s.io | GOPATH=$(GOPATH) xargs go list -f '{{ range $$file := .GoFiles }} {{$$.Dir}}/{{$$file}}{{"\n"}}{{end}}' -STORAGE_PROVISIONER_FILES := GOPATH=$(GOPATH) go list -f '{{join .Deps "\n"}}' k8s.io/minikube/cmd/storage-provisioner | grep k8s.io | GOPATH=$(GOPATH) xargs go list -f '{{ range $$file := .GoFiles }} {{$$.Dir}}/{{$$file}}{{"\n"}}{{end}}' -MINIKUBE_TEST_FILES := go list -f '{{ if .TestGoFiles }} {{.ImportPath}} {{end}}' ./... | grep k8s.io | GOPATH=$(GOPATH) xargs go list -f '{{ range $$file := .GoFiles }} {{$$.Dir}}/{{$$file}}{{"\n"}}{{end}}' +MAKEDEPEND := GOPATH=$(GOPATH) ./makedepend.sh -KVM_DRIVER_FILES := $(shell go list -f '{{join .Deps "\n"}}' ./cmd/drivers/kvm/ | grep k8s.io | xargs go list -f '{{ range $$file := .GoFiles }} {{$$.Dir}}/{{$$file}}{{"\n"}}{{end}}') +LOCALKUBEFILES := ./cmd/localkube/ +MINIKUBEFILES := ./cmd/minikube/ +HYPERKIT_FILES := ./cmd/drivers/hyperkit +STORAGE_PROVISIONER_FILES := ./cmd/storage-provisioner +KVM_DRIVER_FILES := ./cmd/drivers/kvm/ + +MINIKUBE_TEST_FILES := ./... MINIKUBE_BUILD_TAGS := container_image_ostree_stub containers_image_openpgp MINIKUBE_INTEGRATION_BUILD_TAGS := integration $(MINIKUBE_BUILD_TAGS) +SOURCE_DIRS = cmd pkg test # $(call DOCKER, image, command) define DOCKER @@ -91,23 +94,34 @@ endif ifeq ($(GOOS),windows) IS_EXE = ".exe" endif -out/minikube$(IS_EXE): gopath out/minikube-$(GOOS)-$(GOARCH)$(IS_EXE) - cp $(BUILD_DIR)/minikube-$(GOOS)-$(GOARCH) $(BUILD_DIR)/minikube$(IS_EXE) +out/minikube$(IS_EXE): out/minikube-$(GOOS)-$(GOARCH)$(IS_EXE) + cp $< $@ + +out/localkube.d: + GOOS=linux GOARCH=amd64 $(MAKEDEPEND) out/localkube $(ORG) $(LOCALKUBEFILES) $^ > $@ -out/localkube: $(shell $(LOCALKUBEFILES)) +-include out/localkube.d +out/localkube: ifeq ($(LOCALKUBE_BUILD_IN_DOCKER),y) $(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@) else - CGO_ENABLED=1 go build -tags static_build -ldflags=$(LOCALKUBE_LDFLAGS) -o $(BUILD_DIR)/localkube ./cmd/localkube + CGO_ENABLED=1 go build -ldflags=$(LOCALKUBE_LDFLAGS) -o $(BUILD_DIR)/localkube ./cmd/localkube endif out/minikube-windows-amd64.exe: out/minikube-windows-amd64 cp out/minikube-windows-amd64 out/minikube-windows-amd64.exe -out/minikube-%-amd64: pkg/minikube/assets/assets.go $(shell $(MINIKUBEFILES)) +out/minikube.d: pkg/minikube/assets/assets.go + $(MAKEDEPEND) out/minikube-$(GOOS)-$(GOARCH) $(ORG) $(MINIKUBEFILES) $^ > $@ + +-include out/minikube.d +out/minikube-%-amd64: pkg/minikube/assets/assets.go ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y) $(call DOCKER,$(BUILD_IMAGE),/usr/bin/make $@) else +ifneq ($(GOPATH)/src/$(REPOPATH),$(PWD)) + $(warning Warning: Building minikube outside the GOPATH, should be $(GOPATH)/src/$(REPOPATH) but is $(PWD)) +endif GOOS=$* go build -tags "$(MINIKUBE_BUILD_TAGS)" -ldflags="$(MINIKUBE_LDFLAGS) $(K8S_VERSION_LDFLAGS)" -a -o $@ k8s.io/minikube/cmd/minikube endif @@ -151,6 +165,9 @@ test-iso: test-pkg/%: go test -v -test.timeout=30m $(REPOPATH)/$* --tags="$(MINIKUBE_BUILD_TAGS)" +.PHONY: depend +depend: out/localkube.d out/minikube.d out/test.d out/docker-machine-driver-hyperkit.d out/storage-provisioner.d out/docker-machine-driver-kvm2.d + .PHONY: all all: cross drivers e2e-cross images out/localkube @@ -176,14 +193,12 @@ integration-versioned: out/minikube go test -v -test.timeout=30m $(REPOPATH)/test/integration --tags="$(MINIKUBE_INTEGRATION_BUILD_TAGS) versioned" $(TEST_ARGS) .PHONY: test -test: $(shell $(MINIKUBE_TEST_FILES)) pkg/minikube/assets/assets.go - ./test.sh +out/test.d: pkg/minikube/assets/assets.go + $(MAKEDEPEND) -t test $(ORG) $(MINIKUBE_TEST_FILES) $^ > $@ -.PHONY: gopath -gopath: -ifneq ($(GOPATH)/src/$(REPOPATH),$(PWD)) - $(warning Warning: Building minikube outside the GOPATH, should be $(GOPATH)/src/$(REPOPATH) but is $(PWD)) -endif +-include out/test.d +test: + ./test.sh pkg/minikube/assets/assets.go: $(GOPATH)/bin/go-bindata $(shell find deploy/addons -type f) $(GOPATH)/bin/go-bindata -nomemcopy -o pkg/minikube/assets/assets.go -pkg assets deploy/addons/... @@ -213,8 +228,12 @@ clean: .PHONY: gendocs gendocs: out/docs/minikube.md +.PHONY: fmt +fmt: + @gofmt -l -s -w $(SOURCE_DIRS) + out/docs/minikube.md: $(shell find cmd) $(shell find pkg/minikube/constants) pkg/minikube/assets/assets.go - cd $(GOPATH)/src/$(REPOPATH) && go run -ldflags="$(K8S_VERSION_LDFLAGS) $(MINIKUBE_LDFLAGS)" -tags gendocs hack/gen_help_text.go + cd $(GOPATH)/src/$(REPOPATH) && go run -ldflags="$(K8S_VERSION_LDFLAGS) $(MINIKUBE_LDFLAGS)" hack/gen_help_text.go out/minikube_$(DEB_VERSION).deb: out/minikube-linux-amd64 cp -r installers/linux/deb/minikube_deb_template out/minikube_$(DEB_VERSION) @@ -250,7 +269,11 @@ out/minikube-installer.exe: out/minikube-windows-amd64.exe mv out/windows_tmp/minikube-installer.exe out/minikube-installer.exe rm -rf out/windows_tmp -out/docker-machine-driver-hyperkit: $(shell $(HYPERKIT_FILES)) +out/docker-machine-driver-hyperkit.d: + $(MAKEDEPEND) out/docker-machine-driver-hyperkit $(ORG) $(HYPERKIT_FILES) $^ > $@ + +-include out/docker-machine-driver-hyperkit.d +out/docker-machine-driver-hyperkit: ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y) $(call DOCKER,$(HYPERKIT_BUILD_IMAGE),CC=o64-clang CXX=o64-clang++ /usr/bin/make $@) else @@ -303,7 +326,11 @@ $(ISO_BUILD_IMAGE): deploy/iso/minikube-iso/Dockerfile @echo "" @echo "$(@) successfully built" -out/storage-provisioner: $(shell $(STORAGE_PROVISIONER_FILES)) +out/storage-provisioner.d: + $(MAKEDEPEND) out/storage-provisioner $(ORG) $(STORAGE_PROVISIONER_FILES) $^ > $@ + +-include out/storage-provisioner.d +out/storage-provisioner: GOOS=linux go build -o $(BUILD_DIR)/storage-provisioner -ldflags=$(LOCALKUBE_LDFLAGS) cmd/storage-provisioner/main.go .PHONY: storage-provisioner-image @@ -319,7 +346,11 @@ release-iso: minikube_iso checksum gsutil cp out/minikube.iso gs://$(ISO_BUCKET)/minikube-$(ISO_VERSION).iso gsutil cp out/minikube.iso.sha256 gs://$(ISO_BUCKET)/minikube-$(ISO_VERSION).iso.sha256 -out/docker-machine-driver-kvm2: $(KVM_DRIVER_FILES) +out/docker-machine-driver-kvm2.d: + $(MAKEDEPEND) out/docker-machine-driver-kvm2 $(ORG) $(KVM_DRIVER_FILES) $^ > $@ + +-include out/docker-machine-driver-kvm2.d +out/docker-machine-driver-kvm2: go build \ -installsuffix "static" \ -ldflags "-X k8s.io/minikube/pkg/drivers/kvm/version.VERSION=$(VERSION)" \ diff --git a/README.md b/README.md index 29028d0a4c6c..07135e349f17 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@ [GoReport Status]: https://goreportcard.com/report/github.com/kubernetes/minikube [GoReport Widget]: https://goreportcard.com/badge/github.com/kubernetes/minikube -[CodeCovResult]: https://codecov.io/gh/kubernetes/minikube +[CodeCovResult]: https://codecov.io/gh/kubernetes/minikube [CodeCovWidget]: https://codecov.io/gh/kubernetes/minikube/branch/master/graph/badge.svg @@ -33,7 +33,7 @@ curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/miniku ### Windows Download the [minikube-windows-amd64.exe](https://storage.googleapis.com/minikube/releases/latest/minikube-windows-amd64.exe) file, rename it to `minikube.exe` and add it to your path. -### Linux Continuous Integration with VM Support +### Linux Continuous Integration without VM Support Example with kubectl installation: ```shell curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && chmod +x minikube @@ -63,7 +63,9 @@ done ### Other Ways to Install -* [Linux] [Arch Linux AUR](https://aur.archlinux.org/packages/minikube/) +* [Linux] + * [Arch Linux AUR](https://aur.archlinux.org/packages/minikube/) + * [Fedora/CentOS/Red Hat COPR](https://copr.fedorainfracloud.org/coprs/antonpatsev/minikube-rpm/) * [Windows] [Chocolatey](https://chocolatey.org/packages/Minikube) ### Minikube Version Management @@ -78,7 +80,7 @@ We also released a Debian package and Windows installer on our [releases page](h * [Hyperkit driver](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#hyperkit-driver), [xhyve driver](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#xhyve-driver), [VirtualBox](https://www.virtualbox.org/wiki/Downloads), or [VMware Fusion](https://www.vmware.com/products/fusion) * Linux * [VirtualBox](https://www.virtualbox.org/wiki/Downloads) or [KVM](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#kvm-driver) - * **NOTE:** Minikube also supports a `--vm-driver=none` option that runs the Kubernetes components on the host and not in a VM. Docker is required to use this driver but no hypervisor. + * **NOTE:** Minikube also supports a `--vm-driver=none` option that runs the Kubernetes components on the host and not in a VM. Docker is required to use this driver but no hypervisor. If you use `--vm-driver=none`, be sure to specify a [bridge network](https://docs.docker.com/network/bridge/#configure-the-default-bridge-network) for docker. Otherwise it might change between network restarts, causing loss of connectivity to your cluster. * Windows * [VirtualBox](https://www.virtualbox.org/wiki/Downloads) or [Hyper-V](https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#hyperV-driver) * VT-x/AMD-v virtualization must be enabled in BIOS @@ -145,7 +147,7 @@ Machine stopped. ### kubectl -The `minikube start` command creates a "[kubectl context](https://kubernetes.io/docs/user-guide/kubectl/v1.7/#-em-set-context-em-)" called "minikube". +The `minikube start` command creates a "[kubectl context](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#-em-set-context-em-)" called "minikube". This context contains the configuration to communicate with your Minikube cluster. Minikube sets this context to default automatically, but if you need to switch back to it in the future, run: @@ -170,7 +172,7 @@ minikube service [-n NAMESPACE] [--url] NAME ## Design -Minikube uses [libmachine](https://github.com/docker/machine/tree/master/libmachine) for provisioning VMs, and [localkube](https://github.com/kubernetes/minikube/tree/master/pkg/localkube) (originally written and donated to this project by [Redspread](https://redspread.com/)) for running the cluster. +Minikube uses [libmachine](https://github.com/docker/machine/tree/master/libmachine) for provisioning VMs, and [localkube](https://github.com/kubernetes/minikube/tree/master/pkg/localkube) (originally written and donated to this project by Redspread) for running the cluster. For more information about Minikube, see the [proposal](https://github.com/kubernetes/community/blob/master/contributors/design-proposals/cluster-lifecycle/local-cluster-ux.md). diff --git a/cmd/localkube/cmd/start.go b/cmd/localkube/cmd/start.go index 96ec0e7b5777..69bf1228334d 100644 --- a/cmd/localkube/cmd/start.go +++ b/cmd/localkube/cmd/start.go @@ -46,7 +46,10 @@ func StartLocalkube() { os.Exit(0) } - // TODO: Require root + if os.Geteuid() != 0 { + fmt.Println("localkube should run as root!") + os.Exit(1) + } SetupServer(Server) Server.StartAll() diff --git a/cmd/minikube/cmd/completion.go b/cmd/minikube/cmd/completion.go index 4b2bd715c632..e0fa198686a7 100644 --- a/cmd/minikube/cmd/completion.go +++ b/cmd/minikube/cmd/completion.go @@ -41,7 +41,7 @@ const longDescription = ` $ apt-get install bash-completion $ source /etc/bash-completion $ source <(minikube completion bash) # for bash users - $ source <(minikube completion bash) # for zsh users + $ source <(minikube completion zsh) # for zsh users Additionally, you may want to output the completion to a file and source in your .bashrc diff --git a/cmd/minikube/cmd/config/config.go b/cmd/minikube/cmd/config/config.go index 067a4bfe72a1..42fb394f9d7a 100644 --- a/cmd/minikube/cmd/config/config.go +++ b/cmd/minikube/cmd/config/config.go @@ -124,6 +124,10 @@ var settings = []Setting{ name: Bootstrapper, set: SetString, //TODO(r2d4): more validation here? }, + { + name: config.ShowBootstrapperDeprecationNotification, + set: SetBool, + }, { name: "dashboard", set: SetBool, @@ -196,6 +200,18 @@ var settings = []Setting{ validations: []setFn{IsValidAddon}, callbacks: []setFn{EnableOrDisableDefaultStorageClass}, }, + { + name: "storage-provisioner", + set: SetBool, + validations: []setFn{IsValidAddon}, + callbacks: []setFn{EnableOrDisableAddon}, + }, + { + name: "metrics-server", + set: SetBool, + validations: []setFn{IsValidAddon}, + callbacks: []setFn{EnableOrDisableAddon}, + }, { name: "hyperv-virtual-switch", set: SetString, diff --git a/cmd/minikube/cmd/config/configure.go b/cmd/minikube/cmd/config/configure.go index d681bd8c2869..9963cb76faf6 100644 --- a/cmd/minikube/cmd/config/configure.go +++ b/cmd/minikube/cmd/config/configure.go @@ -62,7 +62,7 @@ var addonsConfigureCmd = &cobra.Command{ awsAccessKey = AskForStaticValue("-- Enter AWS Secret Access Key: ") awsSessionToken = AskForStaticValueOptional("-- (Optional) Enter AWS Session Token: ") awsRegion = AskForStaticValue("-- Enter AWS Region: ") - awsAccount = AskForStaticValue("-- Enter 12 digit AWS Account ID: ") + awsAccount = AskForStaticValue("-- Enter 12 digit AWS Account ID (Comma seperated list): ") awsRole = AskForStaticValueOptional("-- (Optional) Enter ARN of AWS role to assume: ") } diff --git a/cmd/minikube/cmd/logs.go b/cmd/minikube/cmd/logs.go index 494b9329bc56..0114d8d59aae 100644 --- a/cmd/minikube/cmd/logs.go +++ b/cmd/minikube/cmd/logs.go @@ -50,12 +50,11 @@ var logsCmd = &cobra.Command{ glog.Exitf("Error getting cluster bootstrapper: %s", err) } - s, err := clusterBootstrapper.GetClusterLogs(follow) + err = clusterBootstrapper.GetClusterLogsTo(follow, os.Stdout) if err != nil { log.Println("Error getting machine logs:", err) cmdUtil.MaybeReportErrorAndExit(err) } - fmt.Fprintln(os.Stdout, s) }, } diff --git a/cmd/minikube/cmd/node/add.go b/cmd/minikube/cmd/node/add.go index f8c95c6afba0..1c9740dd56eb 100644 --- a/cmd/minikube/cmd/node/add.go +++ b/cmd/minikube/cmd/node/add.go @@ -10,8 +10,7 @@ import ( "k8s.io/minikube/cmd/minikube/profile" cmdutil "k8s.io/minikube/cmd/util" - "k8s.io/minikube/pkg/minikube" - cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/config" ) func NewCmdAdd() *cobra.Command { @@ -25,7 +24,7 @@ func NewCmdAdd() *cobra.Command { func add(cmd *cobra.Command, args []string) { // TODO Make clusterName into `--cluster=` flag - clusterName := viper.GetString(cfg.MachineProfile) + clusterName := viper.GetString(config.MachineProfile) nodeName := "" if len(args) > 0 { @@ -42,7 +41,7 @@ func add(cmd *cobra.Command, args []string) { nodeName = fmt.Sprintf("node-%d", len(cfg.Nodes)+1) } - node := minikube.NodeConfig{ + node := config.NodeConfig{ Name: nodeName, } diff --git a/cmd/minikube/cmd/node/node.go b/cmd/minikube/cmd/node/node.go index 2459c6a8bac0..a17917f1f808 100644 --- a/cmd/minikube/cmd/node/node.go +++ b/cmd/minikube/cmd/node/node.go @@ -8,7 +8,7 @@ import ( "github.com/spf13/cobra" "k8s.io/minikube/cmd/minikube/profile" - "k8s.io/minikube/pkg/minikube" + "k8s.io/minikube/pkg/minikube/config" ) const internalErrorCode = -1 @@ -30,14 +30,14 @@ func NewCmdNode() *cobra.Command { return cmd } -func getMachineName(clusterName string, node minikube.NodeConfig) string { +func getMachineName(clusterName string, node config.NodeConfig) string { return fmt.Sprintf("%s-%s", clusterName, node.Name) } -func getNode(clusterName, nodeName string) (minikube.NodeConfig, error) { +func getNode(clusterName, nodeName string) (config.NodeConfig, error) { cfg, err := profile.LoadConfigFromFile(clusterName) if err != nil && !os.IsNotExist(err) { - return minikube.NodeConfig{}, errors.Errorf("Error loading profile config: %s", err) + return config.NodeConfig{}, errors.Errorf("Error loading profile config: %s", err) } for _, node := range cfg.Nodes { @@ -46,5 +46,5 @@ func getNode(clusterName, nodeName string) (minikube.NodeConfig, error) { } } - return minikube.NodeConfig{}, errors.Errorf("Node not found in cluster. cluster: %s node: %s", clusterName, nodeName) + return config.NodeConfig{}, errors.Errorf("Node not found in cluster. cluster: %s node: %s", clusterName, nodeName) } diff --git a/cmd/minikube/cmd/root.go b/cmd/minikube/cmd/root.go old mode 100755 new mode 100644 index e363971b28d3..9cd67c9faf78 --- a/cmd/minikube/cmd/root.go +++ b/cmd/minikube/cmd/root.go @@ -166,6 +166,7 @@ func setupViper() { viper.SetDefault(config.WantKubectlDownloadMsg, true) viper.SetDefault(config.WantNoneDriverWarning, true) viper.SetDefault(config.ShowDriverDeprecationNotification, true) + viper.SetDefault(config.ShowBootstrapperDeprecationNotification, true) setFlagsUsingViper() } @@ -175,6 +176,12 @@ func GetClusterBootstrapper(api libmachine.API, bootstrapperName string) (bootst var err error switch bootstrapperName { case bootstrapper.BootstrapperTypeLocalkube: + if viper.GetBool(config.ShowBootstrapperDeprecationNotification) { + fmt.Fprintln(os.Stderr, `WARNING: The localkube bootstrapper is now deprecated and support for it +will be removed in a future release. Please consider switching to the kubeadm bootstrapper, which +is intended to replace the localkube bootstrapper. To disable this message, run +[minikube config set ShowBootstrapperDeprecationNotification false]`) + } b, err = localkube.NewLocalkubeBootstrapper(api) if err != nil { return nil, errors.Wrap(err, "getting localkube bootstrapper") diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 006e836d0904..a797d88d4755 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -128,7 +128,7 @@ func runStart(cmd *cobra.Command, args []string) { } // NOTE Create machine config - config := cluster.MachineConfig{ + config := cfg.MachineConfig{ MachineName: cfg.GetMachineName(), MinikubeISO: viper.GetString(isoURL), Memory: viper.GetInt(memory), @@ -213,7 +213,7 @@ func runStart(cmd *cobra.Command, args []string) { glog.Exitf("Error generating bootstrap token: ", err) } - kubernetesConfig := bootstrapper.KubernetesConfig{ + kubernetesConfig := cfg.KubernetesConfig{ KubernetesVersion: selectedKubernetesVersion, NodeIP: ip, NodeName: cfg.GetMachineName(), @@ -237,7 +237,7 @@ func runStart(cmd *cobra.Command, args []string) { } // Write profile cluster configuration to file - clusterConfig := cluster.Config{ + clusterConfig := cfg.Config{ ClusterName: profileName, MachineConfig: config, KubernetesConfig: kubernetesConfig, @@ -427,7 +427,6 @@ func genBootstrapToken() (string, error) { if err != nil { return "", nil } - second, err := gostrgen.RandGen(16, gostrgen.Lower|gostrgen.Digit, "", "") if err != nil { return "", nil diff --git a/cmd/minikube/cmd/start_node.go b/cmd/minikube/cmd/start_node.go index 2a1c03473bcb..547673597049 100644 --- a/cmd/minikube/cmd/start_node.go +++ b/cmd/minikube/cmd/start_node.go @@ -6,9 +6,10 @@ import ( "github.com/docker/machine/libmachine" "github.com/golang/glog" "k8s.io/minikube/pkg/minikube/cluster" + "k8s.io/minikube/pkg/minikube/config" ) -func startNodes(api libmachine.API, masterIP string, baseConfig cluster.Config, count int) error { +func startNodes(api libmachine.API, masterIP string, baseConfig config.Config, count int) error { for i := 0; i < count; i++ { name := fmt.Sprintf("%s-%d", baseConfig.MachineConfig.MachineName, i+1) newConfig := newConfig(baseConfig.MachineConfig, name) @@ -22,7 +23,7 @@ func startNodes(api libmachine.API, masterIP string, baseConfig cluster.Config, return nil } -func newConfig(baseConfig cluster.MachineConfig, machineName string) cluster.MachineConfig { +func newConfig(baseConfig config.MachineConfig, machineName string) config.MachineConfig { baseConfig.MachineName = machineName return baseConfig } diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go old mode 100755 new mode 100644 diff --git a/cmd/minikube/main.go b/cmd/minikube/main.go index f283afa018df..df6bd86b797c 100644 --- a/cmd/minikube/main.go +++ b/cmd/minikube/main.go @@ -27,12 +27,12 @@ import ( _ "k8s.io/minikube/pkg/provision" ) -const minikubeEnvPrefix = "MINIKUBE_ENABLE_PROFILING" +const minikubeEnableProfile = "MINIKUBE_ENABLE_PROFILING" func main() { defer glog.Flush() - if os.Getenv(minikubeEnvPrefix) == "1" { + if os.Getenv(minikubeEnableProfile) == "1" { defer profile.Start(profile.TraceProfile).Stop() } if os.Getenv(constants.IsMinikubeChildProcess) == "" { diff --git a/cmd/minikube/profile/profile.go b/cmd/minikube/profile/profile.go index 6831800dda44..84ba9e495905 100644 --- a/cmd/minikube/profile/profile.go +++ b/cmd/minikube/profile/profile.go @@ -9,7 +9,6 @@ import ( "github.com/pkg/errors" "github.com/spf13/viper" - "k8s.io/minikube/pkg/minikube/cluster" cfg "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" pkgutil "k8s.io/minikube/pkg/util" @@ -17,7 +16,7 @@ import ( // SaveConfig saves profile cluster configuration in // $MINIKUBE_HOME/profiles//config.json -func SaveConfig(profile string, clusterConfig cluster.Config) error { +func SaveConfig(profile string, clusterConfig cfg.Config) error { data, err := json.MarshalIndent(clusterConfig, "", " ") if err != nil { return err @@ -65,8 +64,8 @@ func saveConfigToFile(data []byte, file string) error { return nil } -func LoadConfigFromFile(profile string) (cluster.Config, error) { - var cc cluster.Config +func LoadConfigFromFile(profile string) (cfg.Config, error) { + var cc cfg.Config if profile == "" { return cc, fmt.Errorf("Profile name cannot be empty.") @@ -92,14 +91,14 @@ func LoadConfigFromFile(profile string) (cluster.Config, error) { return cc, nil } -func LoadClusterConfigs() ([]cluster.Config, error) { +func LoadClusterConfigs() ([]cfg.Config, error) { files := constants.GetProfileFiles() - configs := make([]cluster.Config, len(files)) + configs := make([]cfg.Config, len(files)) for i, f := range files { c, err := loadConfigFromFile(f) if err != nil { - return []cluster.Config{}, errors.Wrapf(err, "Error loading config from file: %s", f) + return []cfg.Config{}, errors.Wrapf(err, "Error loading config from file: %s", f) } configs[i] = c } @@ -107,8 +106,8 @@ func LoadClusterConfigs() ([]cluster.Config, error) { return configs, nil } -func loadConfigFromFile(file string) (cluster.Config, error) { - var c cluster.Config +func loadConfigFromFile(file string) (cfg.Config, error) { + var c cfg.Config reader, err := os.Open(file) defer reader.Close() diff --git a/deploy/addons/addon-manager.yaml b/deploy/addons/addon-manager.yaml index ea2314c64259..33d2017bb054 100644 --- a/deploy/addons/addon-manager.yaml +++ b/deploy/addons/addon-manager.yaml @@ -19,13 +19,13 @@ metadata: namespace: kube-system labels: component: kube-addon-manager - version: v6.5 + version: v8.6 kubernetes.io/minikube-addons: addon-manager spec: hostNetwork: true containers: - name: kube-addon-manager - image: gcr.io/google-containers/kube-addon-manager:v6.5 + image: k8s.gcr.io/kube-addon-manager:v8.6 env: - name: KUBECONFIG value: /var/lib/localkube/kubeconfig diff --git a/deploy/addons/coredns/coreDNS-configmap.yaml b/deploy/addons/coredns/coreDNS-configmap.yaml index 3ea82599dbd6..5f4edecf3840 100644 --- a/deploy/addons/coredns/coreDNS-configmap.yaml +++ b/deploy/addons/coredns/coreDNS-configmap.yaml @@ -11,11 +11,12 @@ data: errors log health - kubernetes cluster.local 10.0.0.0/24 { + kubernetes cluster.local in-addr.arpa ip6.arpa { pods insecure - upstream /etc/resolv.conf + upstream + fallthrough in-addr.arpa ip6.arpa } - prometheus + prometheus :9153 proxy . /etc/resolv.conf cache 30 } diff --git a/deploy/addons/coredns/coreDNS-controller.yaml b/deploy/addons/coredns/coreDNS-controller.yaml index acb4a0d043a7..b7d77d637987 100644 --- a/deploy/addons/coredns/coreDNS-controller.yaml +++ b/deploy/addons/coredns/coreDNS-controller.yaml @@ -9,6 +9,10 @@ metadata: addonmanager.kubernetes.io/mode: Reconcile spec: replicas: 1 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 selector: matchLabels: k8s-app: kube-dns @@ -25,7 +29,7 @@ spec: effect: NoSchedule containers: - name: coredns - image: registry.hub.docker.com/coredns/coredns:1.0.2 + image: registry.hub.docker.com/coredns/coredns:1.0.6 imagePullPolicy: IfNotPresent resources: limits: @@ -44,9 +48,6 @@ spec: - containerPort: 53 name: dns-tcp protocol: TCP - - containerPort: 9153 - name: metrics - protocol: TCP livenessProbe: httpGet: path: /health diff --git a/deploy/addons/coredns/coreDNS-svc.yaml b/deploy/addons/coredns/coreDNS-svc.yaml index baee6b3ebe56..c53916af3da1 100644 --- a/deploy/addons/coredns/coreDNS-svc.yaml +++ b/deploy/addons/coredns/coreDNS-svc.yaml @@ -18,6 +18,3 @@ spec: - name: dns-tcp port: 53 protocol: TCP - - name: metrics - port: 9153 - protocol: TCP diff --git a/deploy/addons/efk/elasticsearch-rc.yaml b/deploy/addons/efk/elasticsearch-rc.yaml index 3288dd0ffcf6..e282ffb2ada7 100644 --- a/deploy/addons/efk/elasticsearch-rc.yaml +++ b/deploy/addons/efk/elasticsearch-rc.yaml @@ -34,12 +34,14 @@ spec: spec: containers: - name: elasticsearch-logging - image: gcr.io/google-containers/elasticsearch:v5.6.2 + image: k8s.gcr.io/elasticsearch:v5.6.2 resources: limits: cpu: 500m + memory: 2400Mi requests: cpu: 100m + memory: 2350Mi ports: - containerPort: 9200 name: db diff --git a/deploy/addons/efk/fluentd-es-rc.yaml b/deploy/addons/efk/fluentd-es-rc.yaml index 67e871375db3..0c5e3581ebf5 100644 --- a/deploy/addons/efk/fluentd-es-rc.yaml +++ b/deploy/addons/efk/fluentd-es-rc.yaml @@ -31,7 +31,7 @@ spec: spec: containers: - name: fluentd-es - image: gcr.io/google-containers/fluentd-elasticsearch:v2.0.2 + image: k8s.gcr.io/fluentd-elasticsearch:v2.0.2 env: - name: FLUENTD_ARGS value: --no-supervisor -q diff --git a/deploy/addons/ingress/ingress-configmap.yaml b/deploy/addons/ingress/ingress-configmap.yaml index 2e9a53626586..da60d2172b84 100644 --- a/deploy/addons/ingress/ingress-configmap.yaml +++ b/deploy/addons/ingress/ingress-configmap.yaml @@ -14,13 +14,15 @@ apiVersion: v1 data: + # see https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/configmap.md for all possible options and their description map-hash-bucket-size: "128" + hsts: "false" kind: ConfigMap metadata: name: nginx-load-balancer-conf namespace: kube-system labels: - addonmanager.kubernetes.io/mode: Reconcile + addonmanager.kubernetes.io/mode: EnsureExists --- apiVersion: v1 kind: ConfigMap diff --git a/deploy/addons/ingress/ingress-rc.yaml b/deploy/addons/ingress/ingress-rc.yaml index d120267338c9..3a571edf212b 100644 --- a/deploy/addons/ingress/ingress-rc.yaml +++ b/deploy/addons/ingress/ingress-rc.yaml @@ -77,7 +77,7 @@ spec: spec: terminationGracePeriodSeconds: 60 containers: - - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0 + - image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.12.0 name: nginx-ingress-controller imagePullPolicy: IfNotPresent readinessProbe: diff --git a/deploy/addons/metrics-server/metrics-apiservice.yaml b/deploy/addons/metrics-server/metrics-apiservice.yaml new file mode 100644 index 000000000000..a644200fa526 --- /dev/null +++ b/deploy/addons/metrics-server/metrics-apiservice.yaml @@ -0,0 +1,16 @@ +apiVersion: apiregistration.k8s.io/v1beta1 +kind: APIService +metadata: + name: v1beta1.metrics.k8s.io + labels: + kubernetes.io/minikube-addons: metrics-server + addonmanager.kubernetes.io/mode: Reconcile +spec: + service: + name: metrics-server + namespace: kube-system + group: metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: true + groupPriorityMinimum: 100 + versionPriority: 100 diff --git a/deploy/addons/metrics-server/metrics-server-deployment.yaml b/deploy/addons/metrics-server/metrics-server-deployment.yaml new file mode 100644 index 000000000000..c2a86b5ac79b --- /dev/null +++ b/deploy/addons/metrics-server/metrics-server-deployment.yaml @@ -0,0 +1,26 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: metrics-server + namespace: kube-system + labels: + k8s-app: metrics-server + kubernetes.io/minikube-addons: metrics-server + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + matchLabels: + k8s-app: metrics-server + template: + metadata: + name: metrics-server + labels: + k8s-app: metrics-server + spec: + containers: + - name: metrics-server + image: k8s.gcr.io/metrics-server-amd64:v0.2.1 + imagePullPolicy: Always + command: + - /metrics-server + - --source=kubernetes.summary_api:'' diff --git a/deploy/addons/metrics-server/metrics-server-service.yaml b/deploy/addons/metrics-server/metrics-server-service.yaml new file mode 100644 index 000000000000..e3766c9e8de5 --- /dev/null +++ b/deploy/addons/metrics-server/metrics-server-service.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: metrics-server + namespace: kube-system + labels: + kubernetes.io/name: "Metrics-server" + kubernetes.io/minikube-addons: metrics-server + kubernetes.io/minikube-addons-endpoint: metrics-server + addonmanager.kubernetes.io/mode: Reconcile +spec: + selector: + k8s-app: metrics-server + ports: + - port: 443 + protocol: TCP + targetPort: 443 diff --git a/deploy/addons/registry-creds/registry-creds-rc.yaml b/deploy/addons/registry-creds/registry-creds-rc.yaml index b75ad5672a5d..bd26bb52beb9 100644 --- a/deploy/addons/registry-creds/registry-creds-rc.yaml +++ b/deploy/addons/registry-creds/registry-creds-rc.yaml @@ -4,14 +4,14 @@ metadata: name: registry-creds namespace: kube-system labels: - version: v1.8 + version: v1.9 addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/minikube-addons: registry-creds spec: replicas: 1 selector: name: registry-creds - version: v1.8 + version: v1.9 addonmanager.kubernetes.io/mode: Reconcile template: metadata: @@ -21,7 +21,7 @@ spec: addonmanager.kubernetes.io/mode: Reconcile spec: containers: - - image: registry.hub.docker.com/upmcenterprises/registry-creds:1.8 + - image: registry.hub.docker.com/upmcenterprises/registry-creds:1.9 name: registry-creds imagePullPolicy: Always env: diff --git a/deploy/addons/storage-provisioner/storage-provisioner.yaml b/deploy/addons/storage-provisioner/storage-provisioner.yaml index 5563b85a681d..72ee7feefb34 100644 --- a/deploy/addons/storage-provisioner/storage-provisioner.yaml +++ b/deploy/addons/storage-provisioner/storage-provisioner.yaml @@ -12,6 +12,32 @@ # See the License for the specific language governing permissions and # limitations under the License. +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: storage-provisioner + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: storage-provisioner + labels: + addonmanager.kubernetes.io/mode: EnsureExists +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:persistent-volume-provisioner +subjects: + - kind: ServiceAccount + name: storage-provisioner + namespace: kube-system + +--- apiVersion: v1 kind: Pod metadata: @@ -19,8 +45,9 @@ metadata: namespace: kube-system labels: integration-test: storage-provisioner - addonmanager.kubernetes.io/mode: EnsureExists + addonmanager.kubernetes.io/mode: Reconcile spec: + serviceAccountName: storage-provisioner hostNetwork: true containers: - name: storage-provisioner diff --git a/deploy/docker/README.md b/deploy/docker/README.md index 358e11b5e972..a2f249d2d113 100644 --- a/deploy/docker/README.md +++ b/deploy/docker/README.md @@ -72,7 +72,7 @@ spec: hostNetwork: true containers: - name: kube-addon-manager - image: gcr.io/google-containers/kube-addon-manager:v6.5 + image: k8s.gcr.io/kube-addon-manager:v6.5 imagePullPolicy: IfNotPresent resources: requests: diff --git a/deploy/iso/minikube-iso/board/coreos/minikube/linux-4.9_defconfig b/deploy/iso/minikube-iso/board/coreos/minikube/linux-4.9_defconfig index 8620fcda47f9..248ef29f2106 100644 --- a/deploy/iso/minikube-iso/board/coreos/minikube/linux-4.9_defconfig +++ b/deploy/iso/minikube-iso/board/coreos/minikube/linux-4.9_defconfig @@ -166,6 +166,7 @@ CONFIG_NETFILTER_XT_MATCH_HELPER=m CONFIG_NETFILTER_XT_MATCH_HL=m CONFIG_NETFILTER_XT_MATCH_IPCOMP=m CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m CONFIG_NETFILTER_XT_MATCH_L2TP=m CONFIG_NETFILTER_XT_MATCH_LENGTH=m CONFIG_NETFILTER_XT_MATCH_LIMIT=m @@ -482,4 +483,21 @@ CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_KVM_AMD=m CONFIG_VHOST_VSOCK=m diff --git a/deploy/iso/minikube-iso/configs/minikube_defconfig b/deploy/iso/minikube-iso/configs/minikube_defconfig index 1d0fcda9f610..f98f32a8484e 100644 --- a/deploy/iso/minikube-iso/configs/minikube_defconfig +++ b/deploy/iso/minikube-iso/configs/minikube_defconfig @@ -60,3 +60,4 @@ BR2_TARGET_ROOTFS_ISO9660_BOOT_MENU="$(BR2_EXTERNAL_MINIKUBE_PATH)/board/coreos/ BR2_TARGET_SYSLINUX=y BR2_PACKAGE_HOST_E2TOOLS=y BR2_PACKAGE_SYSDIG=y +BR2_PACKAGE_SSHFS=y diff --git a/deploy/iso/minikube-iso/package/Config.in b/deploy/iso/minikube-iso/package/Config.in index fbec9a82f713..1ad69cbf0c12 100644 --- a/deploy/iso/minikube-iso/package/Config.in +++ b/deploy/iso/minikube-iso/package/Config.in @@ -3,6 +3,7 @@ menu "System tools" source "$BR2_EXTERNAL_MINIKUBE_PATH/package/runc-master/Config.in" source "$BR2_EXTERNAL_MINIKUBE_PATH/package/kpod/Config.in" source "$BR2_EXTERNAL_MINIKUBE_PATH/package/crio-bin/Config.in" + source "$BR2_EXTERNAL_MINIKUBE_PATH/package/cri-tools/Config.in" source "$BR2_EXTERNAL_MINIKUBE_PATH/package/automount/Config.in" source "$BR2_EXTERNAL_MINIKUBE_PATH/package/docker-bin/Config.in" source "$BR2_EXTERNAL_MINIKUBE_PATH/package/cni-bin/Config.in" diff --git a/deploy/iso/minikube-iso/package/cri-tools/Config.in b/deploy/iso/minikube-iso/package/cri-tools/Config.in new file mode 100644 index 000000000000..b82d70116434 --- /dev/null +++ b/deploy/iso/minikube-iso/package/cri-tools/Config.in @@ -0,0 +1,5 @@ +config BR2_PACKAGE_CRI_TOOLS + bool "cri-tools" + default y + depends on BR2_PACKAGE_HOST_GO_ARCH_SUPPORTS + depends on BR2_PACKAGE_HOST_GO_CGO_LINKING_SUPPORTS diff --git a/deploy/iso/minikube-iso/package/cri-tools/cri-tools.hash b/deploy/iso/minikube-iso/package/cri-tools/cri-tools.hash new file mode 100644 index 000000000000..dac949a4dff8 --- /dev/null +++ b/deploy/iso/minikube-iso/package/cri-tools/cri-tools.hash @@ -0,0 +1,3 @@ +sha256 e83b5a8ff6fc2c98c3463600e74a86d7dbbea6330f883cd5663cbd59641a8086 v0.2.tar.gz +sha256 2c8e45414d5804628f557171d622ca3e4a55dfc1f2de0cd33bcba98e863342d1 v1.0.0-alpha.0.tar.gz +sha256 688cd566e11f09247e8135ac1ee65e5afb3ea3303c00569c5e1e265e3b2c68f6 v1.0.0-beta.0.tar.gz diff --git a/deploy/iso/minikube-iso/package/cri-tools/cri-tools.mk b/deploy/iso/minikube-iso/package/cri-tools/cri-tools.mk new file mode 100644 index 000000000000..825bc9c5b4e3 --- /dev/null +++ b/deploy/iso/minikube-iso/package/cri-tools/cri-tools.mk @@ -0,0 +1,35 @@ +################################################################################ +# +# cri-tools +# +################################################################################ + +CRI_TOOLS_VERSION = v1.0.0-beta.0 +CRI_TOOLS_SITE = https://github.com/kubernetes-incubator/cri-tools/archive +CRI_TOOLS_SOURCE = $(CRI_TOOLS_VERSION).tar.gz +CRI_TOOLS_LICENSE = Apache-2.0 +CRI_TOOLS_LICENSE_FILES = LICENSE +CRI_TOOLS_DEPENDENCIES = +CRI_TOOLS_GOPATH = $(@D)/_output +CRI_TOOLS_ENV = \ + CGO_ENABLED=1 \ + GOPATH="$(CRI_TOOLS_GOPATH)" \ + PATH=$(CRI_TOOLS_GOPATH)/bin:$(BR_PATH) + + +define CRI_TOOLS_CONFIGURE_CMDS + mkdir -p $(CRI_TOOLS_GOPATH)/src/github.com/kubernetes-incubator + ln -sf $(@D) $(CRI_TOOLS_GOPATH)/src/github.com/kubernetes-incubator/cri-tools +endef + +define CRI_TOOLS_BUILD_CMDS + $(CRI_TOOLS_ENV) $(MAKE) $(TARGET_CONFIGURE_OPTS) -C $(@D) crictl +endef + +define CRI_TOOLS_INSTALL_TARGET_CMDS + $(INSTALL) -Dm755 \ + $(CRI_TOOLS_GOPATH)/bin/crictl \ + $(TARGET_DIR)/usr/bin/crictl +endef + +$(eval $(generic-package)) diff --git a/deploy/iso/minikube-iso/package/crio-bin/crio-bin.hash b/deploy/iso/minikube-iso/package/crio-bin/crio-bin.hash index f0fee400e4f9..9781d351b961 100644 --- a/deploy/iso/minikube-iso/package/crio-bin/crio-bin.hash +++ b/deploy/iso/minikube-iso/package/crio-bin/crio-bin.hash @@ -1 +1,2 @@ sha256 d310d52706262009af886dbd3e8dcd09a339cdc3b57dc22a9121e6d6a87d2921 v1.8.4.tar.gz +sha256 9f79cee99e272c9cfc561ae31235d84d4da59fd5c8b3d3ab6623bf9a92d90c5a v1.10.0.tar.gz diff --git a/deploy/iso/minikube-iso/package/crio-bin/crio-bin.mk b/deploy/iso/minikube-iso/package/crio-bin/crio-bin.mk index 9fc2a6b35cfc..c8aaefcfd433 100644 --- a/deploy/iso/minikube-iso/package/crio-bin/crio-bin.mk +++ b/deploy/iso/minikube-iso/package/crio-bin/crio-bin.mk @@ -4,7 +4,7 @@ # ################################################################################ -CRIO_BIN_VERSION = v1.8.4 +CRIO_BIN_VERSION = v1.10.0 CRIO_BIN_SITE = https://github.com/kubernetes-incubator/cri-o/archive CRIO_BIN_SOURCE = $(CRIO_BIN_VERSION).tar.gz CRIO_BIN_DEPENDENCIES = libgpgme diff --git a/deploy/iso/minikube-iso/package/docker-bin/docker-bin.hash b/deploy/iso/minikube-iso/package/docker-bin/docker-bin.hash index 75d5eb7de142..c9f6e211e6c2 100644 --- a/deploy/iso/minikube-iso/package/docker-bin/docker-bin.hash +++ b/deploy/iso/minikube-iso/package/docker-bin/docker-bin.hash @@ -4,3 +4,4 @@ sha256 e582486c9db0f4229deba9f8517145f8af6c5fae7a1243e6b07876bd3e706620 docker- sha256 a9e90a73c3cdfbf238f148e1ec0eaff5eb181f92f35bdd938fd7dab18e1c4647 docker-17.09.0-ce.tgz sha256 77d3eaa72f2b63c94ea827b548f4a8b572b754a431c59258e3f2730411f64be7 docker-17.09.1-ce.tgz sha256 692e1c72937f6214b1038def84463018d8e320c8eaf8530546c84c2f8f9c767d docker-17.12.0-ce.tgz +sha256 1270dce1bd7e1838d62ae21d2505d87f16efc1d9074645571daaefdfd0c14054 docker-17.12.1-ce.tgz diff --git a/deploy/iso/minikube-iso/package/docker-bin/docker-bin.mk b/deploy/iso/minikube-iso/package/docker-bin/docker-bin.mk index c25988f972a5..a443faecff15 100644 --- a/deploy/iso/minikube-iso/package/docker-bin/docker-bin.mk +++ b/deploy/iso/minikube-iso/package/docker-bin/docker-bin.mk @@ -4,7 +4,7 @@ # ################################################################################ -DOCKER_BIN_VERSION = 17.09.0-ce +DOCKER_BIN_VERSION = 17.12.1-ce DOCKER_BIN_SITE = https://download.docker.com/linux/static/stable/x86_64 DOCKER_BIN_SOURCE = docker-$(DOCKER_BIN_VERSION).tgz diff --git a/deploy/minikube/k8s_releases.json b/deploy/minikube/k8s_releases.json index 844f814b48f9..4dafce12847d 100644 --- a/deploy/minikube/k8s_releases.json +++ b/deploy/minikube/k8s_releases.json @@ -1,4 +1,13 @@ [ + { + "version": "v1.10.0" + }, + { + "version": "v1.9.4" + }, + { + "version": "v1.9.0" + }, { "version": "v1.8.0" }, @@ -92,4 +101,4 @@ { "version": "v1.3.0" } -] +] \ No newline at end of file diff --git a/deploy/minikube/release_sanity_test.go b/deploy/minikube/release_sanity_test.go index 5858a54e6833..128432834377 100644 --- a/deploy/minikube/release_sanity_test.go +++ b/deploy/minikube/release_sanity_test.go @@ -32,19 +32,6 @@ import ( "k8s.io/minikube/pkg/util" ) -const ( - downloadURL = "https://storage.googleapis.com/minikube/releases/%s/minikube-%s-amd64%s" -) - -func getDownloadURL(version, platform string) string { - switch platform { - case "windows": - return fmt.Sprintf(downloadURL, version, platform, ".exe") - default: - return fmt.Sprintf(downloadURL, version, platform, "") - } -} - func getShaFromURL(url string) (string, error) { fmt.Println("Downloading: ", url) r, err := http.Get(url) @@ -71,7 +58,7 @@ func TestReleasesJson(t *testing.T) { fmt.Printf("Checking release: %s\n", r.Name) for platform, sha := range r.Checksums { fmt.Printf("Checking SHA for %s.\n", platform) - actualSha, err := getShaFromURL(getDownloadURL(r.Name, platform)) + actualSha, err := getShaFromURL(util.GetBinaryDownloadURL(r.Name, platform)) if err != nil { t.Errorf("Error calcuating SHA for %s-%s. Error: %s", r.Name, platform, err) continue diff --git a/deploy/minikube/releases.json b/deploy/minikube/releases.json index aa0a44f2060d..bd27f80e95e3 100644 --- a/deploy/minikube/releases.json +++ b/deploy/minikube/releases.json @@ -1,4 +1,12 @@ [ + { + "name": "v0.25.0", + "checksums": { + "darwin": "0d85ecc1cd064bae04ea4a8ef634d4fdc80318b65a2b8d14d105f1e15fcbced0", + "linux": "5676f2d4f37f1c6f9cf99d56c7e1a3016976d657f9715e8cb255667a0c5803c1", + "windows": "de2cbda425ae76ec7240d4543d8642598974fa558727bee10b7866ceb72c3fd9" + } + }, { "name": "v0.24.1", "checksums": { diff --git a/docs/addons.md b/docs/addons.md index 783acd0bdb4c..cc181e817457 100644 --- a/docs/addons.md +++ b/docs/addons.md @@ -31,7 +31,7 @@ The currently supported addons include: * [Kube-dns](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/dns) * [Heapster](https://github.com/kubernetes/heapster): [Troubleshooting Guide](https://github.com/kubernetes/heapster/blob/master/docs/influxdb.md) Note:You will need to login to Grafana as admin/admin in order to access the console * [EFK](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/fluentd-elasticsearch) -* [Registry](https://github.com/kubernetes/kubernetes/tree/master/cluster/addons/registry) +* [Registry](https://github.com/kubernetes/minikube/tree/master/deploy/addons/registry) * [Registry Credentials](https://github.com/upmc-enterprises/registry-creds) * [CoreDNS](https://github.com/coredns/deployment/tree/master/kubernetes) * [Ingress](https://github.com/kubernetes/ingress-nginx) @@ -40,3 +40,5 @@ The currently supported addons include: If you would like to have minikube properly start/restart custom addons, place the addon(s) you wish to be launched with minikube in the `.minikube/addons` directory. Addons in this folder will be moved to the minikube VM and launched each time minikube is started/restarted. If you have a request for an addon in minikube, please open an issue with the name and preferably a link to the addon with a description of its purpose and why it should be added. You can also attempt to add the addon to minikube by following the guide at [Adding an Addon](contributors/adding_an_addon.md) + +**Note:** If you want to have a look at the default configuration for the addons, see [deploy/addons](https://github.com/kubernetes/minikube/tree/master/deploy/addons). diff --git a/docs/alternative_runtimes.md b/docs/alternative_runtimes.md index 19da5ce4bdaf..73443624ac2d 100644 --- a/docs/alternative_runtimes.md +++ b/docs/alternative_runtimes.md @@ -13,7 +13,7 @@ $ minikube start \ To use [CRI-O](https://github.com/kubernetes-incubator/cri-o) as the container runtime, run: -```bash +```shell $ minikube start \ --network-plugin=cni \ --container-runtime=cri-o \ @@ -22,7 +22,7 @@ $ minikube start \ Or you can use the extended version: -```bash +```shell $ minikube start \ --network-plugin=cni \ --extra-config=kubelet.container-runtime=remote \ diff --git a/docs/configuring_kubernetes.md b/docs/configuring_kubernetes.md index 223fc4791b91..ef5e172634b9 100644 --- a/docs/configuring_kubernetes.md +++ b/docs/configuring_kubernetes.md @@ -16,7 +16,7 @@ The kubeadm bootstrapper can be configured by the `--extra-config` flag on the ` and `key=value` is a flag=value pair for the component being configured. For example, -``` +```shell minikube start --extra-config=apiserver.v=10 --extra-config=kubelet.max-pods=100 ``` diff --git a/docs/contributors/build_guide.md b/docs/contributors/build_guide.md index 938470fa8412..de50fcbe56e8 100644 --- a/docs/contributors/build_guide.md +++ b/docs/contributors/build_guide.md @@ -15,7 +15,7 @@ $ sudo dnf install -y glibc-static ### Building from Source Clone minikube into your go path under `$GOPATH/src/k8s.io` -``` +```shell $ git clone https://github.com/kubernetes/minikube.git $GOPATH/src/k8s.io/minikube $ cd $GOPATH/src/k8s.io/minikube $ make diff --git a/docs/contributors/minikube_iso.md b/docs/contributors/minikube_iso.md index 08d56be7b346..1dd5c967c2f0 100644 --- a/docs/contributors/minikube_iso.md +++ b/docs/contributors/minikube_iso.md @@ -1,6 +1,6 @@ ## minikube ISO image -This includes the configuration for an alternative bootable ISO image meant to be used in conjection with minikube. +This includes the configuration for an alternative bootable ISO image meant to be used in conjunction with minikube. It includes: - systemd as the init system @@ -13,7 +13,7 @@ It includes: ### Requirements * Linux -``` +```shell sudo apt-get install build-essential gnupg2 p7zip-full git wget cpio python \ unzip bc gcc-multilib automake libtool locales ``` @@ -23,20 +23,20 @@ Also be sure to have an UTF-8 locale set up in order to build the ISO. ### Build instructions -``` +```shell $ git clone https://github.com/kubernetes/minikube $ cd minikube $ make buildroot-image $ make out/minikube.iso ``` -The build will occurs inside a docker container, if you want to do this +The build will occur inside a docker container. If you want to do this on baremetal, replace `make out/minikube.iso` with `IN_DOCKER=1 make out/minikube.iso`. The bootable ISO image will be available in `out/minikube.iso`. ### Testing local minikube-iso changes -``` +```shell $ ./out/minikube start \ --container-runtime=rkt \ --network-plugin=cni \ @@ -47,7 +47,7 @@ $ ./out/minikube start \ To change the buildroot configuration, execute: -``` +```shell $ cd out/buildroot $ make menuconfig $ make @@ -55,14 +55,14 @@ $ make To save any buildroot configuration changes made with `make menuconfig`, execute: -``` +```shell $ cd out/buildroot $ make savedefconfig ``` The changes will be reflected in the `minikube-iso/configs/minikube_defconfig` file. -``` +```shell $ git status ## master M deploy/iso/minikube-iso/configs/minikube_defconfig @@ -73,7 +73,7 @@ $ git status To make any kernel configuration changes and save them, execute: -``` +```shell $ make linux-menuconfig ``` diff --git a/docs/contributors/releasing_minikube.md b/docs/contributors/releasing_minikube.md index 650e79967555..df24d7513568 100644 --- a/docs/contributors/releasing_minikube.md +++ b/docs/contributors/releasing_minikube.md @@ -9,11 +9,11 @@ See [this PR](https://github.com/kubernetes/minikube/pull/164) for an example. This step isn't always required. Check if there were changes in the deploy directory. If you do this, bump the ISO URL to point to the new ISO, and send a PR. -To do this, build the new iso by running: +To do this, build the new ISO by running: ```shell deploy/iso/build.sh ``` -This will generate a new iso at 'deploy/iso/minikube.iso'. Then upload the iso and shasum using the following command: +This will generate a new ISO at 'deploy/iso/minikube.iso'. Then upload the ISO and shasum using the following command: ```shell gsutil cp deploy/iso/minikube.iso gs://minikube/minikube-.iso gsutil cp deploy/iso/minikube.iso.sha256 gs://minikube/minikube-.iso.sha256 diff --git a/docs/drivers.md b/docs/drivers.md index 3d381e7b17e4..d059af8c4fa4 100644 --- a/docs/drivers.md +++ b/docs/drivers.md @@ -22,7 +22,7 @@ The KVM2 driver is maintained by the minikube team, and is built, tested and rel To install the KVM2 driver, first install and configure the prereqs: -``` +```shell # Install libvirt and qemu-kvm on your system, e.g. # Debian/Ubuntu (for Debian Stretch libvirt-bin it's been replaced with libvirt-clients and libvirt-daemon-system) $ sudo apt install libvirt-bin qemu-kvm @@ -44,13 +44,13 @@ $ newgrp libvirt Then install the driver itself: -``` +```shell curl -LO https://storage.googleapis.com/minikube/releases/latest/docker-machine-driver-kvm2 && chmod +x docker-machine-driver-kvm2 && sudo mv docker-machine-driver-kvm2 /usr/bin/ ``` To use the driver you would do: -``` +```shell minikube start --vm-driver kvm2 ``` @@ -59,7 +59,7 @@ minikube start --vm-driver kvm2 Minikube is currently tested against [`docker-machine-driver-kvm` v0.10.0](https://github.com/dhiltgen/docker-machine-kvm/releases). After following the instructions on the KVM driver releases page, you need to make sure that have the necessary packages and permissions by following these instructions: -``` +```shell # Install libvirt and qemu-kvm on your system, e.g. # Debian/Ubuntu (for Debian Stretch libvirt-bin it's been replaced with libvirt-clients and libvirt-daemon-system) @@ -82,7 +82,7 @@ $ newgrp libvirt To use the driver you would do: -``` +```shell minikube start --vm-driver kvm ``` @@ -93,7 +93,7 @@ It is built from the minikube source tree, and uses [moby/hyperkit](http://githu To install the hyperkit driver: -``` +```shell curl -LO https://storage.googleapis.com/minikube/releases/latest/docker-machine-driver-hyperkit \ && chmod +x docker-machine-driver-hyperkit \ && sudo mv docker-machine-driver-hyperkit /usr/local/bin/ \ @@ -109,7 +109,7 @@ If you encountered errors like `Could not find hyperkit executable`, you might n From https://github.com/zchee/docker-machine-driver-xhyve#install: -``` +```shell $ brew install docker-machine-driver-xhyve # docker-machine-driver-xhyve need root owner and uid diff --git a/docs/host_folder_mount.md b/docs/host_folder_mount.md index c3ad1c3d9efa..d19de3e827c3 100644 --- a/docs/host_folder_mount.md +++ b/docs/host_folder_mount.md @@ -2,7 +2,7 @@ ## Mounting Host Folders `minikube mount /path/to/dir/to/mount:/vm-mount-path` is the recommended way to mount directories into minikube so that they can be used in your local Kubernetes cluster. The command works on all supported platforms. Below is an example workflow for using `minikube mount`: -``` +```shell # terminal 1 $ mkdir ~/mount-dir $ minikube mount ~/mount-dir:/mount-9p @@ -12,7 +12,7 @@ ufs starting # This process has to stay open, so in another terminal... ``` -``` +```shell # terminal 2 $ echo "hello from host" > ~/mount-dir/hello-from-host $ kubectl run -i --rm --tty ubuntu --overrides=' @@ -50,7 +50,7 @@ $ kubectl run -i --rm --tty ubuntu --overrides=' ] } } -' --image=ubuntu:14.04 --restart=Never -- bash +' --image=ubuntu:14.04 --restart=Never -- bash Waiting for pod default/ubuntu to be running, status is Pending, pod ready: false Waiting for pod default/ubuntu to be running, status is Running, pod ready: false diff --git a/docs/http_proxy.md b/docs/http_proxy.md index d0c312224adc..24cb82c148e4 100644 --- a/docs/http_proxy.md +++ b/docs/http_proxy.md @@ -9,8 +9,8 @@ To do this, pass the required environment variables as flags during `minikube st For example: ```shell -$ minikube start --docker-env HTTP_PROXY=http://$YOURPROXY:PORT \ - --docker-env HTTPS_PROXY=https://$YOURPROXY:PORT +$ minikube start --docker-env=HTTP_PROXY=http://$YOURPROXY:PORT \ + --docker-env=HTTPS_PROXY=https://$YOURPROXY:PORT ``` If your Virtual Machine address is 192.168.99.100, then chances are your proxy settings will prevent kubectl from directly reaching it. diff --git a/docs/networking.md b/docs/networking.md index 21a3bb1cfb42..c6f0f9c805cb 100644 --- a/docs/networking.md +++ b/docs/networking.md @@ -3,9 +3,9 @@ The minikube VM is exposed to the host system via a host-only IP address, that can be obtained with the `minikube ip` command. Any services of type `NodePort` can be accessed over that IP address, on the NodePort. -To determine the NodePort for your service, you can use a `kubectl` command like this: +To determine the NodePort for your service, you can use a `kubectl` command like this (note that `nodePort` begins with lowercase `n` in JSON output): -`kubectl get service $SERVICE --output='jsonpath="{.spec.ports[0].NodePort}"'` +`kubectl get service $SERVICE --output='jsonpath="{.spec.ports[0].nodePort}"'` We also have a shortcut for fetching the minikube IP and a service's `NodePort`: diff --git a/docs/persistent_volumes.md b/docs/persistent_volumes.md index 4a88a407e860..9948ef2973c2 100644 --- a/docs/persistent_volumes.md +++ b/docs/persistent_volumes.md @@ -1,9 +1,9 @@ ## Persistent Volumes Minikube supports [PersistentVolumes](http://kubernetes.io/docs/user-guide/persistent-volumes/) of type `hostPath`. -These PersistentVolumes are mapped to a directory inside the minikube VM. +These PersistentVolumes are mapped to a directory inside the Minikube VM. The Minikube VM boots into a tmpfs, so most directories will not be persisted across reboots (`minikube stop`). -However, Minikube is configured to persist files stored under the following directories in the minikube VM: +However, Minikube is configured to persist files stored under the following directories in the Minikube VM: * `/data` * `/var/lib/localkube` diff --git a/docs/reusing_the_docker_daemon.md b/docs/reusing_the_docker_daemon.md index 9213442cc061..1d39b02cf0f1 100644 --- a/docs/reusing_the_docker_daemon.md +++ b/docs/reusing_the_docker_daemon.md @@ -1,26 +1,26 @@ ### Reusing the Docker daemon -When using a single VM of kubernetes it's really handy to reuse the Docker daemon inside the VM; as this means you don't have to build on your host machine and push the image into a docker registry - you can just build inside the same docker daemon as minikube which speeds up local experiments. +When using a single VM of Kubernetes it's really handy to reuse the Docker daemon inside the VM; as this means you don't have to build on your host machine and push the image into a docker registry - you can just build inside the same docker daemon as minikube which speeds up local experiments. To be able to work with the docker daemon on your mac/linux host use the docker-env command in your shell: -``` +```shell eval $(minikube docker-env) ``` -you should now be able to use docker on the command line on your host mac/linux machine talking to the docker daemon inside the minikube VM: -``` +You should now be able to use docker on the command line on your host mac/linux machine talking to the docker daemon inside the minikube VM: +```shell docker ps ``` On Centos 7, docker may report the following error: -``` +```shell Could not read CA certificate "/etc/docker/ca.pem": open /etc/docker/ca.pem: no such file or directory ``` The fix is to update /etc/sysconfig/docker to ensure that minikube's environment changes are respected: -``` +```diff < DOCKER_CERT_PATH=/etc/docker --- > if [ -z "${DOCKER_CERT_PATH}" ]; then diff --git a/hack/boilerplate/boilerplate.py b/hack/boilerplate/boilerplate.py index b8d426eb5316..db11b93da00a 100755 --- a/hack/boilerplate/boilerplate.py +++ b/hack/boilerplate/boilerplate.py @@ -89,7 +89,7 @@ def file_passes(filename, refs, regexs): if p.search(d): return False - # Replace all occurrences of the regex "2017|2016|2015|2014" with "YEAR" + # Replace all occurrences of the regex "2018|2017|2016|2015|2014" with "YEAR" p = regexs["date"] for i, d in enumerate(data): (data[i], found) = p.subn('YEAR', d) @@ -149,8 +149,8 @@ def get_regexs(): regexs = {} # Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing regexs["year"] = re.compile( 'YEAR' ) - # dates can be 2014, 2015, 2016 or 2017, company holder names can be anything - regexs["date"] = re.compile( '(2014|2015|2016|2017)' ) + # dates can be 2014, 2015, 2016, 2017, or 2018, company holder names can be anything + regexs["date"] = re.compile( '(2014|2015|2016|2017|2018)' ) # strip // +build \n\n build constraints regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE) # strip #!.* from shell scripts diff --git a/hack/get_k8s_version.py b/hack/get_k8s_version.py index 7e9690cba88f..91733ea36bb4 100644 --- a/hack/get_k8s_version.py +++ b/hack/get_k8s_version.py @@ -17,6 +17,7 @@ "This package gets the LD flags used to set the version of kubernetes." import json +import re import subprocess import sys from datetime import datetime @@ -24,12 +25,28 @@ K8S_PACKAGE = 'k8s.io/kubernetes/' X_ARGS = ['-X k8s.io/minikube/vendor/k8s.io/kubernetes/pkg/version.', '-X k8s.io/minikube/vendor/k8s.io/client-go/pkg/version.'] -def get_rev(): +def get_commit(): return 'gitCommit=%s' % get_from_godep('Rev') def get_version(): return 'gitVersion=%s' % get_from_godep('Comment') +def get_major_and_minor(): + major = '' + minor = '' + version = get_from_godep('Comment') + # [kubernetes/hack/lib/version.sh]: + # Try to match the "git describe" output to a regex to try to extract + # the "major" and "minor" versions and whether this is the exact tagged + # version or whether the tree is between two tagged versions. + m = re.match('^v([0-9]+)\.([0-9]+)(\.[0-9]+)?([-].*)?$', version) + if m: + major = m.group(1) + minor = m.group(2) + if m.group(4): + minor += "+" + return ('gitMajor=%s' % major, 'gitMinor=%s' % minor) + def get_from_godep(key): with open('./Godeps/Godeps.json') as f: contents = json.load(f) @@ -51,7 +68,9 @@ def get_build_date(): def main(): if len(sys.argv) > 1 and sys.argv[1] == "--k8s-version-only": return get_from_godep('Comment') - args = [get_rev(), get_version(), get_tree_state(), get_build_date()] + major, minor = get_major_and_minor() + args = [get_commit(), get_tree_state(), get_version(), + major, minor, get_build_date()] ret = '' for xarg in X_ARGS: for arg in args: diff --git a/hack/jenkins/common.sh b/hack/jenkins/common.sh index c727b6b2ff2f..3374b009377f 100755 --- a/hack/jenkins/common.sh +++ b/hack/jenkins/common.sh @@ -43,8 +43,8 @@ chmod +x out/docker-machine-driver-* chmod +x out/localkube # Fix permissions in $HOME -sudo chown -R $USER $HOME/.kube -sudo chown -R $USER $HOME/.minikube +sudo chown -R $USER $HOME/.kube || true +sudo chown -R $USER $HOME/.minikube || true export MINIKUBE_WANTREPORTERRORPROMPT=False sudo ./out/minikube-${OS_ARCH} delete || true diff --git a/hack/jenkins/minikube_cross_build_and_upload.sh b/hack/jenkins/minikube_cross_build_and_upload.sh index 368ae7e0a05f..0aae48e1e2cc 100755 --- a/hack/jenkins/minikube_cross_build_and_upload.sh +++ b/hack/jenkins/minikube_cross_build_and_upload.sh @@ -33,10 +33,11 @@ set +e make -j 16 all set -e +make_result="$?" gsutil cp gs://minikube-builds/logs/index.html gs://minikube-builds/logs/${ghprbPullId}/index.html # Exit if the cross build failed. -if [ "$?"-ne 0 ]; then echo "cross build failed"; exit 1; fi +if [ "$make_result"-ne 0 ]; then echo "cross build failed"; exit 1; fi # If there are ISO changes, build and upload the ISO # then set the default to the newly built ISO for testing diff --git a/hack/jenkins/minishift_linux_integration_tests_kvm.sh b/hack/jenkins/minishift_linux_integration_tests_kvm.sh index cd47b294130c..52bc60d1c96e 100755 --- a/hack/jenkins/minishift_linux_integration_tests_kvm.sh +++ b/hack/jenkins/minishift_linux_integration_tests_kvm.sh @@ -185,12 +185,20 @@ function verify_delete() { print_success_message "Deleting VM" } +function verify_sshfs_installation() { + expected="SSHFS version 2.5" + output=`$BINARY ssh -- sudo sshfs -V` + assert_equal "$output" "$expected" + print_success_message "SSHFS installation" +} + # Tests set +e verify_start_instance verify_vm_ip verify_cifs_installation verify_nfs_installation +verify_sshfs_installation verify_bind_mount verify_swap_space verify_delete diff --git a/hack/jenkins/release_update_installers.sh b/hack/jenkins/release_update_installers.sh index 41edb3a1b02c..88f5c8b14536 100755 --- a/hack/jenkins/release_update_installers.sh +++ b/hack/jenkins/release_update_installers.sh @@ -46,7 +46,7 @@ pushd aur-minikube >/dev/null popd >/dev/null -git clone ssh://aur@aur.archlinux.org/minikube.git aur-minikube-driver-kvm +git clone ssh://aur@aur.archlinux.org/docker-machine-driver-kvm2.git aur-minikube-driver-kvm pushd aur-minikube-driver-kvm >/dev/null sed -e "s/\$PKG_VERSION/${REPLACE_PKG_VERSION}/g" \ sed -e "s/\$MINIKUBE_DRIVER_KVM_SHA256/${REPLACE_MINIKUBE_DRIVER_KVM_SHA256}/g" \ diff --git a/installers/linux/deb/minikube_deb_template/DEBIAN/control b/installers/linux/deb/minikube_deb_template/DEBIAN/control index 16c7abd4c07c..6c1a6a6aca5a 100644 --- a/installers/linux/deb/minikube_deb_template/DEBIAN/control +++ b/installers/linux/deb/minikube_deb_template/DEBIAN/control @@ -3,7 +3,7 @@ Version: --VERSION-- Section: base Priority: optional Architecture: amd64 -Depends: virtualbox +Recommends: virtualbox Maintainer: Aaron Prindle Description: Minikube Minikube is a tool that makes it easy to run Kubernetes locally. diff --git a/makedepend.sh b/makedepend.sh new file mode 100755 index 000000000000..7f4e10c89bad --- /dev/null +++ b/makedepend.sh @@ -0,0 +1,42 @@ +#!/bin/sh + +# Copyright 2018 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Generate go dependencies, for make. Uses `go list". +# Usage: makedepend.sh [-t] output package path [extra] + +PATH_FORMAT='{{ .ImportPath }}{{"\n"}}{{join .Deps "\n"}}' +FILE_FORMAT='{{ range $file := .GoFiles }} {{$.Dir}}/{{$file}}{{"\n"}}{{end}}' + +if [ "$1" = "-t" ] +then + PATH_FORMAT='{{ if .TestGoFiles }} {{.ImportPath}} {{end}}' + shift +fi + +out=$1 +pkg=$2 +path=$3 +extra=$4 + +# check for mandatory parameters +test -n "$out$pkg$path" || exit 1 + +echo "$out: $extra\\" +go list -f "$PATH_FORMAT" $path | + grep "$pkg" | + xargs go list -f "$FILE_FORMAT" | + sed -e "s|^ ${GOPATH}| \$(GOPATH)|;s/$/ \\\\/" +echo " #" diff --git a/pkg/drivers/kvm/domain.go b/pkg/drivers/kvm/domain.go index 1942583a5b6c..3af8a9c3c3d8 100644 --- a/pkg/drivers/kvm/domain.go +++ b/pkg/drivers/kvm/domain.go @@ -37,6 +37,7 @@ const domainTmpl = ` + hvm diff --git a/pkg/drivers/none/none.go b/pkg/drivers/none/none.go index 8e493f71c5da..78063b104a16 100644 --- a/pkg/drivers/none/none.go +++ b/pkg/drivers/none/none.go @@ -22,6 +22,8 @@ import ( "os/exec" "strings" + "github.com/golang/glog" + "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/state" "github.com/pkg/errors" @@ -31,8 +33,9 @@ import ( ) const driverName = "none" -const dockerkillcmd = `docker rm $(docker kill $(docker ps -a --filter="name=k8s_" --format="{{.ID}}"))` -const dockerstopcmd = `docker stop $(docker ps -a --filter="name=k8s_" --format="{{.ID}}")` +const dockerstopcmd = `docker kill $(docker ps -a --filter="name=k8s_" --format="{{.ID}}")` + +var dockerkillcmd = fmt.Sprintf(`docker rm $(%s)`, dockerstopcmd) // none Driver is a driver designed to run localkube w/o a VM type Driver struct { @@ -124,34 +127,37 @@ fi } func (d *Driver) Kill() error { - cmd := exec.Command("sudo", "systemctl", "stop", "localkube.service") - if err := cmd.Start(); err != nil { - return errors.Wrap(err, "stopping the localkube service") - } - cmd = exec.Command("sudo", "rm", "-rf", "/var/lib/localkube") - if err := cmd.Start(); err != nil { - return errors.Wrap(err, "removing localkube") + for _, cmdStr := range [][]string{ + {"systemctl", "stop", "localkube.service"}, + {"rm", "-rf", "/var/lib/localkube"}, + } { + cmd := exec.Command("sudo", cmdStr...) + if out, err := cmd.CombinedOutput(); err != nil { + glog.Warningf("Error %s running command: %s. Output: %s", err, cmdStr, string(out)) + } } return nil } func (d *Driver) Remove() error { - rmCmd := `for svc in "localkube", "kubelet"; do + rmCmd := `for svc in "localkube" "kubelet"; do sudo systemctl stop "$svc".service done + sudo rm -rf /data + sudo rm -rf /etc/kubernetes/manifests sudo rm -rf /var/lib/localkube || true` - if _, err := runCommand(rmCmd, true); err != nil { - return errors.Wrap(err, "stopping minikube") + for _, cmdStr := range []string{rmCmd, dockerkillcmd} { + if out, err := runCommand(cmdStr, true); err != nil { + glog.Warningf("Error %s running command: %s, Output: %s", err, cmdStr, out) + } } - runCommand(dockerkillcmd, false) - return nil } func (d *Driver) Restart() error { - restartCmd := `for svc in "localkube", "kubelet"; do + restartCmd := `for svc in "localkube" "kubelet"; do if systemctl is-active $svc.service; then sudo systemctl restart "$svc".service fi @@ -179,9 +185,9 @@ func (d *Driver) Start() error { func (d *Driver) Stop() error { var stopcmd = fmt.Sprintf("if [[ `systemctl` =~ -\\.mount ]] &>/dev/null; "+`then - for svc in "localkube", "kubelet"; do - sudo systemctl stop "$svc".service - done +for svc in "localkube" "kubelet"; do + sudo systemctl stop "$svc".service || true +done else sudo kill $(cat %s) fi @@ -199,7 +205,9 @@ fi break } } - runCommand(dockerstopcmd, false) + if out, err := runCommand(dockerstopcmd, false); err != nil { + glog.Warningf("Error %s running command %s. Output: %s", err, dockerstopcmd, out) + } return nil } diff --git a/pkg/localkube/apiserver.go b/pkg/localkube/apiserver.go index 5ea4d49714a0..6d0c84b45987 100644 --- a/pkg/localkube/apiserver.go +++ b/pkg/localkube/apiserver.go @@ -21,14 +21,16 @@ import ( "path" "strconv" + "k8s.io/minikube/pkg/util" + "github.com/coreos/etcd/embed" apiserveroptions "k8s.io/apiserver/pkg/server/options" "k8s.io/apiserver/pkg/storage/storagebackend" + genericoptions "k8s.io/apiserver/pkg/server/options" apiserver "k8s.io/kubernetes/cmd/kube-apiserver/app" "k8s.io/kubernetes/cmd/kube-apiserver/app/options" - kubeapioptions "k8s.io/kubernetes/pkg/kubeapiserver/options" ) func (lk LocalkubeServer) NewAPIServer() Server { @@ -48,13 +50,7 @@ func StartAPIServer(lk LocalkubeServer) func() error { config.SecureServing.ServerCert.CertKey.CertFile = lk.GetPublicKeyCertPath() config.SecureServing.ServerCert.CertKey.KeyFile = lk.GetPrivateKeyCertPath() - config.Admission.PluginNames = []string{ - "NamespaceLifecycle", - "LimitRanger", - "ServiceAccount", - "DefaultStorageClass", - "ResourceQuota", - } + config.Admission.PluginNames = util.DefaultAdmissionControllers // use localkube etcd config.Etcd.StorageConfig.ServerList = []string{embed.DefaultListenClientURLs} @@ -73,7 +69,7 @@ func StartAPIServer(lk LocalkubeServer) func() error { config.AllowPrivileged = true - config.APIEnablement = &kubeapioptions.APIEnablementOptions{ + config.APIEnablement = &genericoptions.APIEnablementOptions{ RuntimeConfig: lk.RuntimeConfig, } diff --git a/pkg/localkube/controller-manager.go b/pkg/localkube/controller-manager.go index ef642ebaf5d4..735ee1179a24 100644 --- a/pkg/localkube/controller-manager.go +++ b/pkg/localkube/controller-manager.go @@ -18,6 +18,7 @@ package localkube import ( controllerManager "k8s.io/kubernetes/cmd/kube-controller-manager/app" + "k8s.io/kubernetes/cmd/kube-controller-manager/app/config" "k8s.io/kubernetes/cmd/kube-controller-manager/app/options" "k8s.io/minikube/pkg/util" ) @@ -27,24 +28,28 @@ func (lk LocalkubeServer) NewControllerManagerServer() Server { } func StartControllerManagerServer(lk LocalkubeServer) func() error { - config := options.NewCMServer() + opts := options.NewKubeControllerManagerOptions() - config.Kubeconfig = util.DefaultKubeConfigPath + opts.Generic.Kubeconfig = util.DefaultKubeConfigPath // defaults from command - config.DeletingPodsQps = 0.1 - config.DeletingPodsBurst = 10 - config.NodeEvictionRate = 0.1 + opts.Generic.ComponentConfig.DeletingPodsQps = 0.1 + opts.Generic.ComponentConfig.DeletingPodsBurst = 10 + opts.Generic.ComponentConfig.NodeEvictionRate = 0.1 - config.EnableProfiling = true - config.VolumeConfiguration.EnableHostPathProvisioning = true - config.VolumeConfiguration.EnableDynamicProvisioning = true - config.ServiceAccountKeyFile = lk.GetPrivateKeyCertPath() - config.RootCAFile = lk.GetCAPublicKeyCertPath() + opts.Generic.ComponentConfig.EnableProfiling = true + opts.Generic.ComponentConfig.VolumeConfiguration.EnableHostPathProvisioning = true + opts.Generic.ComponentConfig.VolumeConfiguration.EnableDynamicProvisioning = true + opts.Generic.ComponentConfig.ServiceAccountKeyFile = lk.GetPrivateKeyCertPath() + opts.Generic.ComponentConfig.RootCAFile = lk.GetCAPublicKeyCertPath() - lk.SetExtraConfigForComponent("controller-manager", &config) + lk.SetExtraConfigForComponent("controller-manager", &opts) + cfg := config.Config{} + if err := opts.ApplyTo(&cfg); err != nil { + panic(err) + } return func() error { - return controllerManager.Run(config) + return controllerManager.Run(cfg.Complete()) } } diff --git a/pkg/localkube/kubelet.go b/pkg/localkube/kubelet.go index 7a7435f2564b..0c81548230b1 100644 --- a/pkg/localkube/kubelet.go +++ b/pkg/localkube/kubelet.go @@ -17,7 +17,6 @@ limitations under the License. package localkube import ( - "k8s.io/apiserver/pkg/util/flag" kubelet "k8s.io/kubernetes/cmd/kubelet/app" "k8s.io/kubernetes/cmd/kubelet/app/options" "k8s.io/minikube/pkg/util" @@ -38,14 +37,13 @@ func StartKubeletServer(lk LocalkubeServer) func() error { } // Master details - config.KubeConfig = flag.NewStringFlag(util.DefaultKubeConfigPath) - config.RequireKubeConfig = true + config.KubeConfig = util.DefaultKubeConfigPath // Set containerized based on the flag config.Containerized = lk.Containerized config.AllowPrivileged = true - config.PodManifestPath = "/etc/kubernetes/manifests" + config.StaticPodPath = "/etc/kubernetes/manifests" // Networking config.ClusterDomain = lk.DNSDomain @@ -54,6 +52,7 @@ func StartKubeletServer(lk LocalkubeServer) func() error { config.PodCIDR = "10.180.1.0/24" config.NodeIP = lk.NodeIP.String() + config.FailSwapOn = false if lk.NetworkPlugin != "" { config.NetworkPluginName = lk.NetworkPlugin diff --git a/pkg/localkube/localkube.go b/pkg/localkube/localkube.go index 3b08b90d602e..d8346d1e0410 100644 --- a/pkg/localkube/localkube.go +++ b/pkg/localkube/localkube.go @@ -26,6 +26,7 @@ import ( "net/http" "path" "strconv" + "strings" "github.com/golang/glog" "github.com/pkg/errors" @@ -179,6 +180,27 @@ func (lk LocalkubeServer) SetExtraConfigForComponent(component string, config in } } +func (lk LocalkubeServer) GetFeatureGates() (map[string]bool, error) { + fg := map[string]bool{} + if lk.FeatureGates == "" { + return fg, nil + } + gates := strings.Split(lk.FeatureGates, ",") + for _, g := range gates { + + kvp := strings.SplitN(g, "=", 2) + if len(kvp) != 2 { + return nil, fmt.Errorf("invalid feature gate specification: %s", g) + } + value, err := strconv.ParseBool(kvp[1]) + if err != nil { + return nil, fmt.Errorf("invalid feature gate specification: %s", g) + } + fg[kvp[0]] = value + } + return fg, nil +} + func (lk LocalkubeServer) loadCert(path string) (*x509.Certificate, error) { contents, err := ioutil.ReadFile(path) if err != nil { diff --git a/pkg/localkube/proxy.go b/pkg/localkube/proxy.go index e617ad79a11b..3b53f84fe087 100644 --- a/pkg/localkube/proxy.go +++ b/pkg/localkube/proxy.go @@ -43,6 +43,10 @@ func StartProxyServer(lk LocalkubeServer) func() error { } opts := kubeproxy.NewOptions() + fg, err := lk.GetFeatureGates() + if err != nil { + panic(err) + } config := &kubeproxyconfig.KubeProxyConfiguration{ OOMScoreAdj: &OOMScoreAdj, ClientConnection: kubeproxyconfig.ClientConnectionConfiguration{ @@ -58,17 +62,15 @@ func StartProxyServer(lk LocalkubeServer) func() error { }, BindAddress: bindaddress, Mode: kubeproxyconfig.ProxyModeIPTables, - FeatureGates: lk.FeatureGates, + FeatureGates: fg, // Disable the healthz check - HealthzBindAddress: "0", + HealthzBindAddress: "", } - _, err := opts.ApplyDefaults(config) - if err != nil { + if _, err := opts.ApplyDefaults(config); err != nil { panic(err) } - opts.SetConfig(config) - lk.SetExtraConfigForComponent("proxy", &config) + opts.SetConfig(config) return func() error { return opts.Run() diff --git a/pkg/localkube/scheduler.go b/pkg/localkube/scheduler.go index 6fda922593d6..59616dfc55fb 100644 --- a/pkg/localkube/scheduler.go +++ b/pkg/localkube/scheduler.go @@ -17,8 +17,8 @@ limitations under the License. package localkube import ( + scheduler "k8s.io/kubernetes/cmd/kube-scheduler/app" "k8s.io/kubernetes/pkg/apis/componentconfig" - scheduler "k8s.io/kubernetes/plugin/cmd/kube-scheduler/app" "k8s.io/minikube/pkg/util" ) diff --git a/pkg/localkube/storage_provisioner.go b/pkg/localkube/storage_provisioner.go index a2baba170b74..343ad8a5deb5 100644 --- a/pkg/localkube/storage_provisioner.go +++ b/pkg/localkube/storage_provisioner.go @@ -25,7 +25,7 @@ import ( "github.com/pkg/errors" "github.com/r2d4/external-storage/lib/controller" "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" @@ -67,7 +67,7 @@ func (p *hostPathProvisioner) Provision(options controller.VolumeOptions) (*v1.P } pv := &v1.PersistentVolume{ - ObjectMeta: meta_v1.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Name: options.PVName, Annotations: map[string]string{ "hostPathProvisionerIdentity": string(p.identity), diff --git a/pkg/minikube/assets/addons.go b/pkg/minikube/assets/addons.go index abc7850307cc..b1c7800eca85 100644 --- a/pkg/minikube/assets/addons.go +++ b/pkg/minikube/assets/addons.go @@ -214,6 +214,23 @@ var Addons = map[string]*Addon{ "ingress-svc.yaml", "0640"), }, false, "ingress"), + "metrics-server": NewAddon([]*BinDataAsset{ + NewBinDataAsset( + "deploy/addons/metrics-server/metrics-apiservice.yaml", + constants.AddonsPath, + "metrics-apiservice.yaml", + "0640"), + NewBinDataAsset( + "deploy/addons/metrics-server/metrics-server-deployment.yaml", + constants.AddonsPath, + "metrics-server-deployment.yaml", + "0640"), + NewBinDataAsset( + "deploy/addons/metrics-server/metrics-server-service.yaml", + constants.AddonsPath, + "metrics-server-service.yaml", + "0640"), + }, false, "metrics-server"), "registry": NewAddon([]*BinDataAsset{ NewBinDataAsset( "deploy/addons/registry/registry-rc.yaml", diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index ffd3d6667177..168c1f54481b 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -17,42 +17,22 @@ limitations under the License. package bootstrapper import ( - "net" + "io" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" - "k8s.io/minikube/pkg/util" ) // Bootstrapper contains all the methods needed to bootstrap a kubernetes cluster type Bootstrapper interface { - StartCluster(KubernetesConfig) error - UpdateCluster(KubernetesConfig) error - RestartCluster(KubernetesConfig) error - GetClusterLogs(follow bool) (string, error) - SetupCerts(cfg KubernetesConfig) error + StartCluster(config.KubernetesConfig) error + UpdateCluster(config.KubernetesConfig) error + RestartCluster(config.KubernetesConfig) error + GetClusterLogsTo(follow bool, out io.Writer) error + SetupCerts(cfg config.KubernetesConfig) error GetClusterStatus() (string, error) } -// KubernetesConfig contains the parameters used to configure the VM Kubernetes. -type KubernetesConfig struct { - KubernetesVersion string - NodeIP string - NodeName string - APIServerName string - APIServerNames []string - APIServerIPs []net.IP - DNSDomain string - ContainerRuntime string - NetworkPlugin string - FeatureGates string - ServiceCIDR string - ExtraOptions util.ExtraOptionSlice - - ShouldLoadCachedImages bool - - BootstrapToken string -} - const ( BootstrapperTypeLocalkube = "localkube" BootstrapperTypeKubeadm = "kubeadm" diff --git a/pkg/minikube/bootstrapper/certs.go b/pkg/minikube/bootstrapper/certs.go index 6b9842c11cd0..4bcaae7c9a79 100644 --- a/pkg/minikube/bootstrapper/certs.go +++ b/pkg/minikube/bootstrapper/certs.go @@ -29,6 +29,7 @@ import ( "k8s.io/client-go/tools/clientcmd/api/latest" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/bootstrapper/runner" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/util" "k8s.io/minikube/pkg/util/kubeconfig" @@ -42,7 +43,7 @@ var ( ) // SetupCerts gets the generated credentials required to talk to the APIServer. -func SetupCerts(cmd runner.CommandRunner, k8s KubernetesConfig) error { +func SetupCerts(cmd runner.CommandRunner, k8s config.KubernetesConfig) error { localPath := constants.GetMinipath() glog.Infof("Setting up certificates for IP: %s\n", k8s.NodeIP) @@ -93,7 +94,7 @@ func SetupCerts(cmd runner.CommandRunner, k8s KubernetesConfig) error { return nil } -func generateCerts(k8s KubernetesConfig) error { +func generateCerts(k8s config.KubernetesConfig) error { serviceIP, err := util.GetServiceClusterIP(k8s.ServiceCIDR) if err != nil { return errors.Wrap(err, "getting service cluster ip") diff --git a/pkg/minikube/bootstrapper/certs_test.go b/pkg/minikube/bootstrapper/certs_test.go index 1fa9ccabde80..79b3023a2ba8 100644 --- a/pkg/minikube/bootstrapper/certs_test.go +++ b/pkg/minikube/bootstrapper/certs_test.go @@ -22,6 +22,7 @@ import ( "testing" "k8s.io/minikube/pkg/minikube/bootstrapper/runner" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/tests" "k8s.io/minikube/pkg/util" @@ -32,7 +33,7 @@ func TestSetupCerts(t *testing.T) { defer os.RemoveAll(tempDir) f := runner.NewFakeCommandRunner() - k8s := KubernetesConfig{ + k8s := config.KubernetesConfig{ APIServerName: constants.APIServerName, DNSDomain: constants.ClusterDNSDomain, ServiceCIDR: util.DefaultServiceCIDR, diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 60996058b0df..3e3f66954412 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -20,6 +20,7 @@ import ( "bytes" "crypto" "fmt" + "io" "os" "path" "strings" @@ -104,7 +105,7 @@ func (k *KubeadmBootstrapper) GetClusterStatus() (string, error) { // TODO(r2d4): Should this aggregate all the logs from the control plane? // Maybe subcommands for each component? minikube logs apiserver? -func (k *KubeadmBootstrapper) GetClusterLogs(follow bool) (string, error) { +func (k *KubeadmBootstrapper) GetClusterLogsTo(follow bool, out io.Writer) error { var flags []string if follow { flags = append(flags, "-f") @@ -112,35 +113,49 @@ func (k *KubeadmBootstrapper) GetClusterLogs(follow bool) (string, error) { logsCommand := fmt.Sprintf("sudo journalctl %s -u kubelet", strings.Join(flags, " ")) if follow { - if err := k.c.Run(logsCommand); err != nil { - return "", errors.Wrap(err, "getting shell") + if err := k.c.CombinedOutputTo(logsCommand, out); err != nil { + return errors.Wrap(err, "getting cluster logs") } - } + } else { - logs, err := k.c.CombinedOutput(logsCommand) - if err != nil { - return "", errors.Wrap(err, "getting cluster logs") + logs, err := k.c.CombinedOutput(logsCommand) + if err != nil { + return errors.Wrap(err, "getting cluster logs") + } + fmt.Fprint(out, logs) } - - return logs, nil + return nil } -func (k *KubeadmBootstrapper) StartCluster(k8s bootstrapper.KubernetesConfig) error { - // We use --skip-preflight-checks since we have our own custom addons +func (k *KubeadmBootstrapper) StartCluster(k8s config.KubernetesConfig) error { + // We use --ignore-preflight-errors=DirAvailable since we have our own custom addons // that we also stick in /etc/kubernetes/manifests + // We use --ignore-preflight-errors=Swap since minikube.iso allocates a swap partition. + // (it should probably stop doing this, though...) + // We use --ignore-preflight-errors=CRI since /var/run/dockershim.sock is not present. + // (because we start kubelet with an invalid config) b := bytes.Buffer{} - if err := kubeadmInitTemplate.Execute(&b, struct{ KubeadmConfigFile, Token string }{constants.KubeadmConfigFile, k8s.BootstrapToken}); err != nil { + templateContext := struct { + KubeadmConfigFile string + Token string + Preflights []string + }{ + KubeadmConfigFile: constants.KubeadmConfigFile, + Token: k8s.BootstrapToken, + Preflights: constants.Preflights, + } + if err := kubeadmInitTemplate.Execute(&b, templateContext); err != nil { return err } - err := k.c.Run(b.String()) + out, err := k.c.CombinedOutput(b.String()) if err != nil { - return errors.Wrapf(err, "kubeadm init error running command: %s", b.String()) + return errors.Wrapf(err, "kubeadm init error %s running command: %s", b.String(), out) } //TODO(r2d4): get rid of global here master = k8s.NodeName - if err := util.RetryAfter(100, unmarkMaster, time.Millisecond*500); err != nil { + if err := util.RetryAfter(200, unmarkMaster, time.Second*1); err != nil { return errors.Wrap(err, "timed out waiting to unmark master") } @@ -151,7 +166,7 @@ func (k *KubeadmBootstrapper) StartCluster(k8s bootstrapper.KubernetesConfig) er return nil } -func (k *KubeadmBootstrapper) JoinNode(k8s bootstrapper.KubernetesConfig) error { +func (k *KubeadmBootstrapper) JoinNode(k8s config.KubernetesConfig) error { // We use --skip-preflight-checks since we have our own custom addons // that we also stick in /etc/kubernetes/manifests b := bytes.Buffer{} @@ -202,7 +217,7 @@ func addAddons(files *[]assets.CopyableFile) error { return nil } -func (k *KubeadmBootstrapper) RestartCluster(k8s bootstrapper.KubernetesConfig) error { +func (k *KubeadmBootstrapper) RestartCluster(k8s config.KubernetesConfig) error { opts := struct { KubeadmConfigFile string }{ @@ -225,7 +240,7 @@ func (k *KubeadmBootstrapper) RestartCluster(k8s bootstrapper.KubernetesConfig) return nil } -func (k *KubeadmBootstrapper) SetupCerts(k8s bootstrapper.KubernetesConfig) error { +func (k *KubeadmBootstrapper) SetupCerts(k8s config.KubernetesConfig) error { return bootstrapper.SetupCerts(k.c, k8s) } @@ -258,7 +273,7 @@ func SetContainerRuntime(cfg map[string]string, runtime string) map[string]strin // NewKubeletConfig generates a new systemd unit containing a configured kubelet // based on the options present in the KubernetesConfig. -func NewKubeletConfig(hostname, ip string, k8s bootstrapper.KubernetesConfig) (string, error) { +func NewKubeletConfig(hostname, ip string, k8s config.KubernetesConfig) (string, error) { version, err := ParseKubernetesVersion(k8s.KubernetesVersion) if err != nil { return "", errors.Wrap(err, "parsing kubernetes version") @@ -292,7 +307,7 @@ func NewKubeletConfig(hostname, ip string, k8s bootstrapper.KubernetesConfig) (s return b.String(), nil } -func (k *KubeadmBootstrapper) UpdateCluster(cfg bootstrapper.KubernetesConfig) error { +func (k *KubeadmBootstrapper) UpdateCluster(cfg config.KubernetesConfig) error { if cfg.ShouldLoadCachedImages { // Make best effort to load any cached images glog.Infoln("Loading cached images....") @@ -361,7 +376,7 @@ sudo systemctl start kubelet return nil } -func (k *KubeadmBootstrapper) UpdateNode(cfg bootstrapper.KubernetesConfig) error { +func (k *KubeadmBootstrapper) UpdateNode(cfg config.KubernetesConfig) error { kubeletCfg, err := NewKubeletConfig(k.machineName, k.ip, cfg) if err != nil { return errors.Wrap(err, "generating kubelet config") @@ -403,7 +418,7 @@ func (k *KubeadmBootstrapper) UpdateNode(cfg bootstrapper.KubernetesConfig) erro return nil } -func generateConfig(k8s bootstrapper.KubernetesConfig) (string, error) { +func generateConfig(k8s config.KubernetesConfig) (string, error) { version, err := ParseKubernetesVersion(k8s.KubernetesVersion) if err != nil { return "", errors.Wrap(err, "parsing kubernetes version") diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm_test.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm_test.go index d9093009f222..11a9a3e3a744 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm_test.go @@ -19,22 +19,22 @@ package kubeadm import ( "testing" - "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/util" ) func TestGenerateConfig(t *testing.T) { tests := []struct { description string - cfg bootstrapper.KubernetesConfig + cfg config.KubernetesConfig expectedCfg string shouldErr bool }{ { description: "no extra args", - cfg: bootstrapper.KubernetesConfig{ + cfg: config.KubernetesConfig{ NodeIP: "192.168.1.100", - KubernetesVersion: "v1.8.0", + KubernetesVersion: "v1.10.0", NodeName: "minikube", }, expectedCfg: `apiVersion: kubeadm.k8s.io/v1alpha1 @@ -42,7 +42,7 @@ kind: MasterConfiguration api: advertiseAddress: 192.168.1.100 bindPort: 8443 -kubernetesVersion: v1.8.0 +kubernetesVersion: v1.10.0 certificatesDir: /var/lib/localkube/certs/ networking: serviceSubnet: 10.96.0.0/12 @@ -50,13 +50,15 @@ networking: etcd: dataDir: /data nodeName: minikube +apiServerExtraArgs: + admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" `, }, { description: "extra args all components", - cfg: bootstrapper.KubernetesConfig{ + cfg: config.KubernetesConfig{ NodeIP: "192.168.1.101", - KubernetesVersion: "v1.8.0-alpha.0", + KubernetesVersion: "v1.10.0-alpha.0", NodeName: "extra-args-minikube", ExtraOptions: util.ExtraOptionSlice{ util.ExtraOption{ @@ -81,7 +83,7 @@ kind: MasterConfiguration api: advertiseAddress: 192.168.1.101 bindPort: 8443 -kubernetesVersion: v1.8.0-alpha.0 +kubernetesVersion: v1.10.0-alpha.0 certificatesDir: /var/lib/localkube/certs/ networking: serviceSubnet: 10.96.0.0/12 @@ -90,6 +92,7 @@ etcd: dataDir: /data nodeName: extra-args-minikube apiServerExtraArgs: + admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" fail-no-swap: "true" controllerManagerExtraArgs: kube-api-burst: "32" @@ -99,9 +102,9 @@ schedulerExtraArgs: }, { description: "two extra args for one component", - cfg: bootstrapper.KubernetesConfig{ + cfg: config.KubernetesConfig{ NodeIP: "192.168.1.101", - KubernetesVersion: "v1.8.0-alpha.0", + KubernetesVersion: "v1.10.0-alpha.0", NodeName: "extra-args-minikube", ExtraOptions: util.ExtraOptionSlice{ util.ExtraOption{ @@ -121,7 +124,7 @@ kind: MasterConfiguration api: advertiseAddress: 192.168.1.101 bindPort: 8443 -kubernetesVersion: v1.8.0-alpha.0 +kubernetesVersion: v1.10.0-alpha.0 certificatesDir: /var/lib/localkube/certs/ networking: serviceSubnet: 10.96.0.0/12 @@ -130,15 +133,16 @@ etcd: dataDir: /data nodeName: extra-args-minikube apiServerExtraArgs: + admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" fail-no-swap: "true" kube-api-burst: "32" `, }, { description: "enable feature gates", - cfg: bootstrapper.KubernetesConfig{ + cfg: config.KubernetesConfig{ NodeIP: "192.168.1.101", - KubernetesVersion: "v1.8.0-alpha.0", + KubernetesVersion: "v1.10.0-alpha.0", NodeName: "extra-args-minikube", FeatureGates: "HugePages=true,OtherFeature=false", }, @@ -147,7 +151,7 @@ kind: MasterConfiguration api: advertiseAddress: 192.168.1.101 bindPort: 8443 -kubernetesVersion: v1.8.0-alpha.0 +kubernetesVersion: v1.10.0-alpha.0 certificatesDir: /var/lib/localkube/certs/ networking: serviceSubnet: 10.96.0.0/12 @@ -156,6 +160,7 @@ etcd: dataDir: /data nodeName: extra-args-minikube apiServerExtraArgs: + admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" feature-gates: "HugePages=true,OtherFeature=false" controllerManagerExtraArgs: feature-gates: "HugePages=true,OtherFeature=false" @@ -165,9 +170,9 @@ schedulerExtraArgs: }, { description: "enable feature gates and extra config", - cfg: bootstrapper.KubernetesConfig{ + cfg: config.KubernetesConfig{ NodeIP: "192.168.1.101", - KubernetesVersion: "v1.8.0-alpha.0", + KubernetesVersion: "v1.10.0-alpha.0", NodeName: "extra-args-minikube", FeatureGates: "HugePages=true,OtherFeature=false", ExtraOptions: util.ExtraOptionSlice{ @@ -183,7 +188,7 @@ kind: MasterConfiguration api: advertiseAddress: 192.168.1.101 bindPort: 8443 -kubernetesVersion: v1.8.0-alpha.0 +kubernetesVersion: v1.10.0-alpha.0 certificatesDir: /var/lib/localkube/certs/ networking: serviceSubnet: 10.96.0.0/12 @@ -192,6 +197,7 @@ etcd: dataDir: /data nodeName: extra-args-minikube apiServerExtraArgs: + admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" fail-no-swap: "true" feature-gates: "HugePages=true,OtherFeature=false" controllerManagerExtraArgs: @@ -203,7 +209,7 @@ schedulerExtraArgs: { // Unknown components should fail silently description: "unknown component", - cfg: bootstrapper.KubernetesConfig{ + cfg: config.KubernetesConfig{ NodeIP: "192.168.1.101", KubernetesVersion: "v1.8.0-alpha.0", NodeName: "extra-args-minikube", diff --git a/pkg/minikube/bootstrapper/kubeadm/templates.go b/pkg/minikube/bootstrapper/kubeadm/templates.go index 1baae390cf64..ec568e0ed75c 100644 --- a/pkg/minikube/bootstrapper/kubeadm/templates.go +++ b/pkg/minikube/bootstrapper/kubeadm/templates.go @@ -73,7 +73,8 @@ sudo /usr/bin/kubeadm alpha phase controlplane all --config {{.KubeadmConfigFile sudo /usr/bin/kubeadm alpha phase etcd local --config {{.KubeadmConfigFile}} `)) -var kubeadmInitTemplate = template.Must(template.New("kubeadmInitTemplate").Parse("sudo /usr/bin/kubeadm init --config {{.KubeadmConfigFile}} --skip-preflight-checks")) +var kubeadmInitTemplate = template.Must(template.New("kubeadmInitTemplate").Parse( + "sudo /usr/bin/kubeadm init --config {{.KubeadmConfigFile}} {{range .Preflights}}--ignore-preflight-errors={{.}} {{end}}")) var kubeadmJoinTemplate = template.Must(template.New("kubeadmJoinTemplate").Parse("sudo /usr/bin/kubeadm join --token {{.Token}} {{.ServerAddress}}")) diff --git a/pkg/minikube/bootstrapper/kubeadm/util.go b/pkg/minikube/bootstrapper/kubeadm/util.go index 9ee506a5860c..8b457bbc2ec4 100644 --- a/pkg/minikube/bootstrapper/kubeadm/util.go +++ b/pkg/minikube/bootstrapper/kubeadm/util.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" - "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/service" "k8s.io/minikube/pkg/util" ) @@ -146,7 +146,7 @@ users: ` ) -func restartKubeProxy(k8s bootstrapper.KubernetesConfig) error { +func restartKubeProxy(k8s config.KubernetesConfig) error { client, err := util.GetClient() if err != nil { return errors.Wrap(err, "getting k8s client") diff --git a/pkg/minikube/bootstrapper/kubeadm/versions.go b/pkg/minikube/bootstrapper/kubeadm/versions.go index 7c3595feeaa4..f2b31cde22b6 100644 --- a/pkg/minikube/bootstrapper/kubeadm/versions.go +++ b/pkg/minikube/bootstrapper/kubeadm/versions.go @@ -173,7 +173,15 @@ var versionSpecificOpts = []VersionedExtraOption{ // Kubeconfig args NewUnversionedOption(Kubelet, "kubeconfig", "/etc/kubernetes/kubelet.conf"), NewUnversionedOption(Kubelet, "bootstrap-kubeconfig", "/etc/kubernetes/bootstrap-kubelet.conf"), - NewUnversionedOption(Kubelet, "require-kubeconfig", "true"), + { + Option: util.ExtraOption{ + Component: Apiserver, + Key: "require-kubeconfig", + Value: "true", + }, + LessThanOrEqual: semver.MustParse("1.9.10"), + }, + NewUnversionedOption(Kubelet, "hostname-override", "minikube"), // System pods args NewUnversionedOption(Kubelet, "pod-manifest-path", "/etc/kubernetes/manifests"), @@ -191,6 +199,14 @@ var versionSpecificOpts = []VersionedExtraOption{ // Cgroup args NewUnversionedOption(Kubelet, "cadvisor-port", "0"), NewUnversionedOption(Kubelet, "cgroup-driver", "cgroupfs"), + { + Option: util.ExtraOption{ + Component: Apiserver, + Key: "admission-control", + Value: strings.Join(util.DefaultAdmissionControllers, ","), + }, + GreaterThanOrEqual: semver.MustParse("1.9.0-alpha.0"), + }, } func VersionIsBetween(version, gte, lte semver.Version) bool { diff --git a/pkg/minikube/bootstrapper/kubeadm/worker.go b/pkg/minikube/bootstrapper/kubeadm/worker.go index abcc25c2da8b..be54edfd0c55 100644 --- a/pkg/minikube/bootstrapper/kubeadm/worker.go +++ b/pkg/minikube/bootstrapper/kubeadm/worker.go @@ -6,15 +6,15 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube" - "k8s.io/minikube/pkg/minikube/bootstrapper" + cfg "k8s.io/minikube/pkg/minikube/config" ) type WorkerBootstrapper struct { - config bootstrapper.KubernetesConfig + config cfg.KubernetesConfig ui io.Writer } -func NewWorkerBootstrapper(c bootstrapper.KubernetesConfig, ui io.Writer) minikube.Bootstrapper { +func NewWorkerBootstrapper(c cfg.KubernetesConfig, ui io.Writer) minikube.Bootstrapper { return &WorkerBootstrapper{config: c, ui: ui} } diff --git a/pkg/minikube/bootstrapper/localkube/commands.go b/pkg/minikube/bootstrapper/localkube/commands.go index 570680b8223c..f7b40cf249f2 100644 --- a/pkg/minikube/bootstrapper/localkube/commands.go +++ b/pkg/minikube/bootstrapper/localkube/commands.go @@ -21,10 +21,9 @@ import ( gflag "flag" "fmt" "strings" - "text/template" - "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" ) @@ -67,7 +66,7 @@ else fi ` -func GetStartCommand(kubernetesConfig bootstrapper.KubernetesConfig) (string, error) { +func GetStartCommand(kubernetesConfig config.KubernetesConfig) (string, error) { localkubeStartCommand, err := GenLocalkubeStartCmd(kubernetesConfig) if err != nil { return "", err @@ -95,7 +94,7 @@ func GetStartCommand(kubernetesConfig bootstrapper.KubernetesConfig) (string, er return buf.String(), nil } -func GetStartCommandNoSystemd(kubernetesConfig bootstrapper.KubernetesConfig, localkubeStartCmd string) (string, error) { +func GetStartCommandNoSystemd(kubernetesConfig config.KubernetesConfig, localkubeStartCmd string) (string, error) { t := template.Must(template.New("startCommand").Parse(startCommandNoSystemdTemplate)) buf := bytes.Buffer{} data := struct { @@ -115,7 +114,7 @@ func GetStartCommandNoSystemd(kubernetesConfig bootstrapper.KubernetesConfig, lo return buf.String(), nil } -func GetStartCommandSystemd(kubernetesConfig bootstrapper.KubernetesConfig, localkubeStartCmd string) (string, error) { +func GetStartCommandSystemd(kubernetesConfig config.KubernetesConfig, localkubeStartCmd string) (string, error) { t, err := template.New("localkubeConfig").Parse(localkubeSystemdTmpl) if err != nil { return "", err @@ -133,7 +132,7 @@ func GetStartCommandSystemd(kubernetesConfig bootstrapper.KubernetesConfig, loca constants.LocalkubeServicePath), nil } -func GenLocalkubeStartCmd(kubernetesConfig bootstrapper.KubernetesConfig) (string, error) { +func GenLocalkubeStartCmd(kubernetesConfig config.KubernetesConfig) (string, error) { flagVals := make([]string, len(constants.LogFlags)) for _, logFlag := range constants.LogFlags { if logVal := gflag.Lookup(logFlag); logVal != nil && logVal.Value.String() != logVal.DefValue { diff --git a/pkg/minikube/bootstrapper/localkube/commands_test.go b/pkg/minikube/bootstrapper/localkube/commands_test.go index 5b19c103682c..1e12493fb94e 100644 --- a/pkg/minikube/bootstrapper/localkube/commands_test.go +++ b/pkg/minikube/bootstrapper/localkube/commands_test.go @@ -22,7 +22,7 @@ import ( "strings" "testing" - "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/util" ) @@ -32,7 +32,7 @@ func TestGetStartCommandCustomValues(t *testing.T) { "vmodule": "cluster*=5", } flagMapToSetFlags(flagMap) - startCommand, err := GetStartCommand(bootstrapper.KubernetesConfig{}) + startCommand, err := GetStartCommand(config.KubernetesConfig{}) if err != nil { t.Fatalf("Error generating start command: %s", err) } @@ -47,7 +47,7 @@ func TestGetStartCommandCustomValues(t *testing.T) { } func TestGetStartCommandExtraOptions(t *testing.T) { - k := bootstrapper.KubernetesConfig{ + k := config.KubernetesConfig{ ExtraOptions: util.ExtraOptionSlice{ util.ExtraOption{Component: "a", Key: "b", Value: "c"}, util.ExtraOption{Component: "d", Key: "e.f", Value: "g"}, diff --git a/pkg/minikube/bootstrapper/localkube/localkube.go b/pkg/minikube/bootstrapper/localkube/localkube.go index a2f948d25d4f..de3a840e9208 100644 --- a/pkg/minikube/bootstrapper/localkube/localkube.go +++ b/pkg/minikube/bootstrapper/localkube/localkube.go @@ -18,6 +18,7 @@ package localkube import ( "fmt" + "io" "strings" "k8s.io/minikube/pkg/minikube/assets" @@ -58,19 +59,27 @@ func NewLocalkubeBootstrapper(api libmachine.API) (*LocalkubeBootstrapper, error }, nil } -// GetClusterLogs If follow is specified, it will tail the logs -func (lk *LocalkubeBootstrapper) GetClusterLogs(follow bool) (string, error) { +// GetClusterLogs +// If follow is specified, it will tail the logs +func (lk *LocalkubeBootstrapper) GetClusterLogsTo(follow bool, out io.Writer) error { logsCommand, err := GetLogsCommand(follow) if err != nil { - return "", errors.Wrap(err, "Error getting logs command") + return errors.Wrap(err, "Error getting logs command") } - logs, err := lk.cmd.CombinedOutput(logsCommand) - if err != nil { - return "", errors.Wrap(err, "getting cluster logs") + if follow { + err = lk.cmd.CombinedOutputTo(logsCommand, out) + if err != nil { + return errors.Wrap(err, "getting cluster logs") + } + } else { + logs, err := lk.cmd.CombinedOutput(logsCommand) + if err != nil { + return errors.Wrap(err, "getting cluster logs") + } + fmt.Fprint(out, logs) } - - return logs, nil + return nil } // GetClusterStatus gets the status of localkube from the host VM. @@ -90,7 +99,7 @@ func (lk *LocalkubeBootstrapper) GetClusterStatus() (string, error) { } // StartCluster starts a k8s cluster on the specified Host. -func (lk *LocalkubeBootstrapper) StartCluster(kubernetesConfig bootstrapper.KubernetesConfig) error { +func (lk *LocalkubeBootstrapper) StartCluster(kubernetesConfig config.KubernetesConfig) error { startCommand, err := GetStartCommand(kubernetesConfig) if err != nil { return errors.Wrapf(err, "Error generating start command: %s", err) @@ -102,11 +111,11 @@ func (lk *LocalkubeBootstrapper) StartCluster(kubernetesConfig bootstrapper.Kube return nil } -func (lk *LocalkubeBootstrapper) RestartCluster(kubernetesConfig bootstrapper.KubernetesConfig) error { +func (lk *LocalkubeBootstrapper) RestartCluster(kubernetesConfig config.KubernetesConfig) error { return lk.StartCluster(kubernetesConfig) } -func (lk *LocalkubeBootstrapper) UpdateCluster(config bootstrapper.KubernetesConfig) error { +func (lk *LocalkubeBootstrapper) UpdateCluster(config config.KubernetesConfig) error { if config.ShouldLoadCachedImages { // Make best effort to load any cached images go machine.LoadImages(lk.cmd, constants.LocalkubeCachedImages, constants.ImageCacheDir) @@ -147,6 +156,6 @@ func (lk *LocalkubeBootstrapper) UpdateCluster(config bootstrapper.KubernetesCon return nil } -func (lk *LocalkubeBootstrapper) SetupCerts(k8s bootstrapper.KubernetesConfig) error { +func (lk *LocalkubeBootstrapper) SetupCerts(k8s config.KubernetesConfig) error { return bootstrapper.SetupCerts(lk.cmd, k8s) } diff --git a/pkg/minikube/bootstrapper/localkube/localkube_caching.go b/pkg/minikube/bootstrapper/localkube/localkube_caching.go index a84b450dff12..a29a95922161 100644 --- a/pkg/minikube/bootstrapper/localkube/localkube_caching.go +++ b/pkg/minikube/bootstrapper/localkube/localkube_caching.go @@ -32,14 +32,14 @@ import ( "github.com/pkg/errors" "k8s.io/minikube/pkg/minikube/assets" - "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/util" ) // localkubeCacher is a struct with methods designed for caching localkube type localkubeCacher struct { - k8sConf bootstrapper.KubernetesConfig + k8sConf config.KubernetesConfig } func (l *localkubeCacher) getLocalkubeCacheFilepath() string { @@ -51,7 +51,7 @@ func (l *localkubeCacher) getLocalkubeSha256CacheFilepath() string { return l.getLocalkubeCacheFilepath() + ".sha256" } -func localkubeURIWasSpecified(config bootstrapper.KubernetesConfig) bool { +func localkubeURIWasSpecified(config config.KubernetesConfig) bool { // see if flag is different than default -> it was passed by user return config.KubernetesVersion != constants.DefaultKubernetesVersion } diff --git a/pkg/minikube/bootstrapper/localkube/localkube_test.go b/pkg/minikube/bootstrapper/localkube/localkube_test.go index 6cd2ca14a16f..a9f36dea99e3 100644 --- a/pkg/minikube/bootstrapper/localkube/localkube_test.go +++ b/pkg/minikube/bootstrapper/localkube/localkube_test.go @@ -17,15 +17,16 @@ limitations under the License. package localkube import ( + "bytes" "testing" - "k8s.io/minikube/pkg/minikube/bootstrapper" "k8s.io/minikube/pkg/minikube/bootstrapper/runner" + "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" ) func TestStartCluster(t *testing.T) { - expectedStartCmd, err := GetStartCommand(bootstrapper.KubernetesConfig{}) + expectedStartCmd, err := GetStartCommand(config.KubernetesConfig{}) if err != nil { t.Fatalf("generating start command: %s", err) } @@ -50,7 +51,7 @@ func TestStartCluster(t *testing.T) { f := runner.NewFakeCommandRunner() f.SetCommandToOutput(map[string]string{test.startCmd: "ok"}) l := LocalkubeBootstrapper{f} - err := l.StartCluster(bootstrapper.KubernetesConfig{}) + err := l.StartCluster(config.KubernetesConfig{}) if err != nil && test.startCmd == expectedStartCmd { t.Errorf("Error starting cluster: %s", err) } @@ -59,7 +60,7 @@ func TestStartCluster(t *testing.T) { } func TestUpdateCluster(t *testing.T) { - defaultCfg := bootstrapper.KubernetesConfig{ + defaultCfg := config.KubernetesConfig{ KubernetesVersion: constants.DefaultKubernetesVersion, } defaultAddons := []string{ @@ -73,7 +74,7 @@ func TestUpdateCluster(t *testing.T) { } cases := []struct { description string - k8s bootstrapper.KubernetesConfig + k8s config.KubernetesConfig expectedFiles []string shouldErr bool }{ @@ -89,7 +90,7 @@ func TestUpdateCluster(t *testing.T) { }, { description: "no localkube version", - k8s: bootstrapper.KubernetesConfig{}, + k8s: config.KubernetesConfig{}, shouldErr: true, }, } @@ -201,13 +202,14 @@ func TestGetHostLogs(t *testing.T) { }, } + var b bytes.Buffer for _, test := range cases { t.Run(test.description, func(t *testing.T) { t.Parallel() f := runner.NewFakeCommandRunner() f.SetCommandToOutput(test.logsCmdMap) l := LocalkubeBootstrapper{f} - _, err := l.GetClusterLogs(test.follow) + err := l.GetClusterLogsTo(test.follow, &b) if err != nil && !test.shouldErr { t.Errorf("Error getting localkube logs: %s", err) return diff --git a/pkg/minikube/bootstrapper/runner/command_runner.go b/pkg/minikube/bootstrapper/runner/command_runner.go index 466c086f8925..8290f0471fef 100644 --- a/pkg/minikube/bootstrapper/runner/command_runner.go +++ b/pkg/minikube/bootstrapper/runner/command_runner.go @@ -18,6 +18,7 @@ package runner import ( "fmt" + "io" "path/filepath" "k8s.io/minikube/pkg/minikube/assets" @@ -28,6 +29,19 @@ type CommandRunner interface { // Run starts the specified command and waits for it to complete. Run(cmd string) error + // CombinedOutputTo runs the command and stores both command + // output and error to out. A typical usage is: + // + // var b bytes.Buffer + // CombinedOutput(cmd, &b) + // fmt.Println(b.Bytes()) + // + // Or, you can set out to os.Stdout, the command output and + // error would show on your terminal immediately before you + // cmd exit. This is useful for a long run command such as + // continuously print running logs. + CombinedOutputTo(cmd string, out io.Writer) error + // CombinedOutput runs the command and returns its combined standard // output and standard error. CombinedOutput(cmd string) (string, error) @@ -35,7 +49,7 @@ type CommandRunner interface { // Copy is a convenience method that runs a command to copy a file Copy(assets.CopyableFile) error - //Remove is a convenience method that runs a command to remove a file + // Remove is a convenience method that runs a command to remove a file Remove(assets.CopyableFile) error } diff --git a/pkg/minikube/bootstrapper/runner/exec_runner.go b/pkg/minikube/bootstrapper/runner/exec_runner.go index 50f118a363a3..59ef6c9bf88f 100644 --- a/pkg/minikube/bootstrapper/runner/exec_runner.go +++ b/pkg/minikube/bootstrapper/runner/exec_runner.go @@ -17,6 +17,7 @@ limitations under the License. package runner import ( + "bytes" "io" "os" "os/exec" @@ -43,16 +44,31 @@ func (*ExecRunner) Run(cmd string) error { return nil } -// CombinedOutput runs the command in a bash shell and returns its -// combined standard output and standard error. -func (*ExecRunner) CombinedOutput(cmd string) (string, error) { +// CombinedOutputTo runs the command and stores both command +// output and error to out. +func (*ExecRunner) CombinedOutputTo(cmd string, out io.Writer) error { glog.Infoln("Run with output:", cmd) c := exec.Command("/bin/bash", "-c", cmd) - out, err := c.CombinedOutput() + c.Stdout = out + c.Stderr = out + err := c.Run() + if err != nil { + return errors.Wrapf(err, "running command: %s\n.", cmd) + } + + return nil +} + +// CombinedOutput runs the command in a bash shell and returns its +// combined standard output and standard error. +func (e *ExecRunner) CombinedOutput(cmd string) (string, error) { + var b bytes.Buffer + err := e.CombinedOutputTo(cmd, &b) if err != nil { - return "", errors.Wrapf(err, "running command: %s\n output: %s", cmd, out) + return "", errors.Wrapf(err, "running command: %s\n output: %s", cmd, b.Bytes()) } - return string(out), nil + return b.String(), nil + } // Copy copies a file and its permissions @@ -89,6 +105,6 @@ do you have the correct permissions?`, // Remove removes a file func (e *ExecRunner) Remove(f assets.CopyableFile) error { - cmd := getDeleteFileCommand(f) - return e.Run(cmd) + targetPath := filepath.Join(f.GetTargetDir(), f.GetTargetName()) + return os.Remove(targetPath) } diff --git a/pkg/minikube/bootstrapper/runner/fake_runner.go b/pkg/minikube/bootstrapper/runner/fake_runner.go index ae379ec6b611..ef7b88b6d0f4 100644 --- a/pkg/minikube/bootstrapper/runner/fake_runner.go +++ b/pkg/minikube/bootstrapper/runner/fake_runner.go @@ -49,6 +49,21 @@ func (f *FakeCommandRunner) Run(cmd string) error { return err } +// CombinedOutputTo runs the command and stores both command +// output and error to out. +func (f *FakeCommandRunner) CombinedOutputTo(cmd string, out io.Writer) error { + value, ok := f.cmdMap.Load(cmd) + if !ok { + return fmt.Errorf("unavailable command: %s", cmd) + } + _, err := fmt.Fprint(out, value) + if err != nil { + return err + } + + return nil +} + // CombinedOutput returns the set output for a given command text. func (f *FakeCommandRunner) CombinedOutput(cmd string) (string, error) { out, ok := f.cmdMap.Load(cmd) diff --git a/pkg/minikube/bootstrapper/runner/ssh_runner.go b/pkg/minikube/bootstrapper/runner/ssh_runner.go index 2ef6238b5243..a0f3b605f7d9 100644 --- a/pkg/minikube/bootstrapper/runner/ssh_runner.go +++ b/pkg/minikube/bootstrapper/runner/ssh_runner.go @@ -17,6 +17,7 @@ limitations under the License. package runner import ( + "bytes" "fmt" "io" "path" @@ -63,20 +64,36 @@ func (s *SSHRunner) Run(cmd string) error { return sess.Run(cmd) } -// CombinedOutput runs the command on the remote and returns its combined -// standard output and standard error. -func (s *SSHRunner) CombinedOutput(cmd string) (string, error) { +// CombinedOutputTo runs the command and stores both command +// output and error to out. +func (s *SSHRunner) CombinedOutputTo(cmd string, out io.Writer) error { glog.Infoln("Run with output:", cmd) sess, err := s.c.NewSession() if err != nil { - return "", errors.Wrap(err, "getting ssh session") + return errors.Wrap(err, "getting ssh session") } defer sess.Close() - out, err := sess.CombinedOutput(cmd) + + sess.Stdout = out + sess.Stderr = out + + err = sess.Run(cmd) + if err != nil { + return errors.Wrapf(err, "running command: %s\n.", cmd) + } + + return nil +} + +// CombinedOutput runs the command on the remote and returns its combined +// standard output and standard error. +func (s *SSHRunner) CombinedOutput(cmd string) (string, error) { + var b bytes.Buffer + err := s.CombinedOutputTo(cmd, &b) if err != nil { - return "", errors.Wrapf(err, "running command: %s\n output: %s", cmd, out) + return "", errors.Wrapf(err, "running command: %s\n output: %s", cmd, b.Bytes()) } - return string(out), nil + return b.String(), nil } // Copy copies a file to the remote over SSH. diff --git a/pkg/minikube/cluster/cluster.go b/pkg/minikube/cluster/cluster.go index 406cb2c7fc10..56141356ecdd 100644 --- a/pkg/minikube/cluster/cluster.go +++ b/pkg/minikube/cluster/cluster.go @@ -28,9 +28,7 @@ import ( "regexp" "time" - "github.com/docker/machine/drivers/virtualbox" "github.com/docker/machine/libmachine" - "github.com/docker/machine/libmachine/drivers" "github.com/docker/machine/libmachine/engine" "github.com/docker/machine/libmachine/host" "github.com/docker/machine/libmachine/mcnerror" @@ -42,9 +40,9 @@ import ( cfg "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" - pkgutil "k8s.io/minikube/pkg/util" - + "k8s.io/minikube/pkg/minikube/registry" "k8s.io/minikube/pkg/util" + pkgutil "k8s.io/minikube/pkg/util" ) const ( @@ -56,13 +54,13 @@ const ( //see: https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/util/logs/logs.go#L32-L34 func init() { flag.Set("logtostderr", "false") + // Setting the default client to native gives much better performance. ssh.SetDefaultClient(ssh.Native) } // StartHost starts a host VM. -func StartHost(api libmachine.API, config MachineConfig) (*host.Host, error) { - +func StartHost(api libmachine.API, config cfg.MachineConfig) (*host.Host, error) { exists, err := api.Exists(config.MachineName) if err != nil { return nil, errors.Wrapf(err, "Error checking if host exists: %s", config.MachineName) @@ -123,7 +121,7 @@ func StopHost(name string, api libmachine.API) error { func DeleteHost(name string, api libmachine.API) error { host, err := api.Load(name) if err != nil { - return errors.Wrapf(err, "Error deleting host: %s", cfg.GetMachineName()) + return errors.Wrapf(err, "Error deleting host: %s", name) } m := util.MultiError{} m.Collect(host.Driver.Remove()) @@ -175,7 +173,7 @@ func GetHostDriverIpByName(name string, api libmachine.API) (net.IP, error) { return ip, nil } -func engineOptions(config MachineConfig) *engine.Options { +func engineOptions(config cfg.MachineConfig) *engine.Options { o := engine.Options{ Env: config.DockerEnv, InsecureRegistry: append([]string{pkgutil.DefaultServiceCIDR}, config.InsecureRegistry...), @@ -185,33 +183,8 @@ func engineOptions(config MachineConfig) *engine.Options { return &o } -func createVirtualboxHost(config MachineConfig) drivers.Driver { - d := virtualbox.NewDriver(config.MachineName, constants.GetMinipath()) - d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) - d.Memory = config.Memory - d.CPU = config.CPUs - d.DiskSize = int(config.DiskSize) - d.HostOnlyCIDR = config.HostOnlyCIDR - d.NoShare = config.DisableDriverMounts - d.NatNicType = defaultVirtualboxNicType - d.HostOnlyNicType = defaultVirtualboxNicType - return d -} - -func createHost(api libmachine.API, config MachineConfig) (*host.Host, error) { - var driver interface{} - - if config.VMDriver != "none" { - if err := config.Downloader.CacheMinikubeISOFromURL(config.MinikubeISO); err != nil { - return nil, errors.Wrap(err, "Error attempting to cache minikube ISO from URL") - } - } - +func preCreateHost(config *cfg.MachineConfig) error { switch config.VMDriver { - case "virtualbox": - driver = createVirtualboxHost(config) - case "vmwarefusion": - driver = createVMwareFusionHost(config) case "kvm": if viper.GetBool(cfg.ShowDriverDeprecationNotification) { fmt.Fprintln(os.Stderr, `WARNING: The kvm driver is now deprecated and support for it will be removed in a future release. @@ -219,9 +192,6 @@ func createHost(api libmachine.API, config MachineConfig) (*host.Host, error) { See https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#kvm2-driver for more information. To disable this message, run [minikube config set WantShowDriverDeprecationNotification false]`) } - driver = createKVMHost(config) - case "kvm2": - driver = createKVM2Host(config) case "xhyve": if viper.GetBool(cfg.ShowDriverDeprecationNotification) { fmt.Fprintln(os.Stderr, `WARNING: The xhyve driver is now deprecated and support for it will be removed in a future release. @@ -229,17 +199,34 @@ Please consider switching to the hyperkit driver, which is intended to replace t See https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#hyperkit-driver for more information. To disable this message, run [minikube config set WantShowDriverDeprecationNotification false]`) } - driver = createXhyveHost(config) - case "hyperv": - driver = createHypervHost(config) - case "none": - driver = createNoneHost(config) - case "hyperkit": - driver = createHyperkitHost(config) - default: - glog.Exitf("Unsupported driver: %s\n", config.VMDriver) } + return nil +} + +func createHost(api libmachine.API, config cfg.MachineConfig) (*host.Host, error) { + err := preCreateHost(&config) + if err != nil { + return nil, err + } + + def, err := registry.Driver(config.VMDriver) + if err != nil { + if err == registry.ErrDriverNotFound { + glog.Exitf("Unsupported driver: %s\n", config.VMDriver) + } else { + glog.Exit(err.Error()) + } + } + + if config.VMDriver != "none" { + if err := config.Downloader.CacheMinikubeISOFromURL(config.MinikubeISO); err != nil { + return nil, errors.Wrap(err, "Error attempting to cache minikube ISO from URL") + } + } + + driver := def.ConfigCreator(config) + data, err := json.Marshal(driver) if err != nil { return nil, errors.Wrap(err, "Error marshalling json") @@ -329,7 +316,7 @@ func GetVMHostIP(host *host.Host) (net.IP, error) { } return ip, nil case "virtualbox": - out, err := exec.Command(detectVBoxManageCmd(), "showvminfo", "minikube", "--machinereadable").Output() + out, err := exec.Command(detectVBoxManageCmd(), "showvminfo", host.Name, "--machinereadable").Output() if err != nil { return []byte{}, errors.Wrap(err, "Error running vboxmanage command") } diff --git a/pkg/minikube/cluster/cluster_darwin.go b/pkg/minikube/cluster/cluster_darwin.go index 336661731047..ba4ff3896e4c 100644 --- a/pkg/minikube/cluster/cluster_darwin.go +++ b/pkg/minikube/cluster/cluster_darwin.go @@ -18,84 +18,8 @@ package cluster import ( "os/exec" - - "github.com/docker/machine/drivers/vmwarefusion" - "github.com/docker/machine/libmachine/drivers" - "github.com/pborman/uuid" - "k8s.io/minikube/pkg/drivers/hyperkit" - cfg "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/constants" ) -func createVMwareFusionHost(config MachineConfig) drivers.Driver { - d := vmwarefusion.NewDriver(cfg.GetMachineName(), constants.GetMinipath()).(*vmwarefusion.Driver) - d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) - d.Memory = config.Memory - d.CPU = config.CPUs - d.DiskSize = config.DiskSize - - // TODO(philips): push these defaults upstream to fixup this driver - d.SSHPort = 22 - d.ISO = d.ResolveStorePath("boot2docker.iso") - return d -} - -type xhyveDriver struct { - *drivers.BaseDriver - Boot2DockerURL string - BootCmd string - CPU int - CaCertPath string - DiskSize int64 - MacAddr string - Memory int - PrivateKeyPath string - UUID string - NFSShare bool - DiskNumber int - Virtio9p bool - Virtio9pFolder string - QCow2 bool - RawDisk bool -} - -func createHyperkitHost(config MachineConfig) *hyperkit.Driver { - return &hyperkit.Driver{ - BaseDriver: &drivers.BaseDriver{ - MachineName: cfg.GetMachineName(), - StorePath: constants.GetMinipath(), - SSHUser: "docker", - }, - Boot2DockerURL: config.Downloader.GetISOFileURI(config.MinikubeISO), - DiskSize: config.DiskSize, - Memory: config.Memory, - CPU: config.CPUs, - NFSShares: config.NFSShare, - NFSSharesRoot: config.NFSSharesRoot, - UUID: uuid.NewUUID().String(), - Cmdline: "loglevel=3 user=docker console=ttyS0 console=tty0 noembed nomodeset norestore waitusb=10 systemd.legacy_systemd_cgroup_controller=yes base host=" + cfg.GetMachineName(), - } -} - -func createXhyveHost(config MachineConfig) *xhyveDriver { - useVirtio9p := !config.DisableDriverMounts - return &xhyveDriver{ - BaseDriver: &drivers.BaseDriver{ - MachineName: cfg.GetMachineName(), - StorePath: constants.GetMinipath(), - }, - Memory: config.Memory, - CPU: config.CPUs, - Boot2DockerURL: config.Downloader.GetISOFileURI(config.MinikubeISO), - BootCmd: "loglevel=3 user=docker console=ttyS0 console=tty0 noembed nomodeset norestore waitusb=10 systemd.legacy_systemd_cgroup_controller=yes base host=" + cfg.GetMachineName(), - DiskSize: int64(config.DiskSize), - Virtio9p: useVirtio9p, - Virtio9pFolder: "/Users", - QCow2: false, - RawDisk: config.XhyveDiskDriver == "virtio-blk", - } -} - func detectVBoxManageCmd() string { cmd := "VBoxManage" if path, err := exec.LookPath(cmd); err == nil { diff --git a/pkg/minikube/cluster/cluster_linux.go b/pkg/minikube/cluster/cluster_linux.go index fb21521cefde..ba4ff3896e4c 100644 --- a/pkg/minikube/cluster/cluster_linux.go +++ b/pkg/minikube/cluster/cluster_linux.go @@ -17,71 +17,9 @@ limitations under the License. package cluster import ( - "fmt" "os/exec" - "path/filepath" - - "github.com/docker/machine/libmachine/drivers" - "k8s.io/minikube/pkg/drivers/none" - cfg "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/constants" ) -type kvmDriver struct { - *drivers.BaseDriver - - Memory int - DiskSize int - CPU int - Network string - PrivateNetwork string - ISO string - Boot2DockerURL string - DiskPath string - CacheMode string - IOMode string -} - -func createKVMHost(config MachineConfig) *kvmDriver { - return &kvmDriver{ - BaseDriver: &drivers.BaseDriver{ - MachineName: cfg.GetMachineName(), - StorePath: constants.GetMinipath(), - SSHUser: "docker", - }, - Memory: config.Memory, - CPU: config.CPUs, - Network: config.KvmNetwork, - PrivateNetwork: "docker-machines", - Boot2DockerURL: config.Downloader.GetISOFileURI(config.MinikubeISO), - DiskSize: config.DiskSize, - DiskPath: filepath.Join(constants.GetMinipath(), "machines", cfg.GetMachineName(), fmt.Sprintf("%s.rawdisk", cfg.GetMachineName())), - ISO: filepath.Join(constants.GetMinipath(), "machines", cfg.GetMachineName(), "boot2docker.iso"), - CacheMode: "default", - IOMode: "threads", - } -} - -func createKVM2Host(config MachineConfig) *kvmDriver { - return &kvmDriver{ - BaseDriver: &drivers.BaseDriver{ - MachineName: cfg.GetMachineName(), - StorePath: constants.GetMinipath(), - SSHUser: "docker", - }, - Memory: config.Memory, - CPU: config.CPUs, - Network: config.KvmNetwork, - PrivateNetwork: "minikube-net", - Boot2DockerURL: config.Downloader.GetISOFileURI(config.MinikubeISO), - DiskSize: config.DiskSize, - DiskPath: filepath.Join(constants.GetMinipath(), "machines", cfg.GetMachineName(), fmt.Sprintf("%s.rawdisk", cfg.GetMachineName())), - ISO: filepath.Join(constants.GetMinipath(), "machines", cfg.GetMachineName(), "boot2docker.iso"), - CacheMode: "default", - IOMode: "threads", - } -} - func detectVBoxManageCmd() string { cmd := "VBoxManage" if path, err := exec.LookPath(cmd); err == nil { @@ -89,12 +27,3 @@ func detectVBoxManageCmd() string { } return cmd } - -func createNoneHost(config MachineConfig) *none.Driver { - return &none.Driver{ - BaseDriver: &drivers.BaseDriver{ - MachineName: cfg.GetMachineName(), - StorePath: constants.GetMinipath(), - }, - } -} diff --git a/pkg/minikube/cluster/cluster_non_darwin_panic.go b/pkg/minikube/cluster/cluster_non_darwin_panic.go deleted file mode 100644 index 82b510adfaaa..000000000000 --- a/pkg/minikube/cluster/cluster_non_darwin_panic.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build !darwin - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import "github.com/docker/machine/libmachine/drivers" - -func createVMwareFusionHost(config MachineConfig) drivers.Driver { - panic("vmwarefusion not supported") -} - -func createXhyveHost(config MachineConfig) drivers.Driver { - panic("xhyve not supported") -} - -func createHyperkitHost(config MachineConfig) drivers.Driver { - panic("hyperkit not supported") -} diff --git a/pkg/minikube/cluster/cluster_non_linux_panic.go b/pkg/minikube/cluster/cluster_non_linux_panic.go deleted file mode 100644 index f3ea6ed21186..000000000000 --- a/pkg/minikube/cluster/cluster_non_linux_panic.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build !linux - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import "github.com/docker/machine/libmachine/drivers" - -func createKVMHost(config MachineConfig) drivers.Driver { - panic("kvm not supported") -} - -func createKVM2Host(config MachineConfig) drivers.Driver { - panic("kvm2 not supported") -} - -func createNoneHost(config MachineConfig) drivers.Driver { - panic("no-vm not supported") -} diff --git a/pkg/minikube/cluster/cluster_non_windows_panic.go b/pkg/minikube/cluster/cluster_non_windows_panic.go deleted file mode 100644 index e3afdcb23289..000000000000 --- a/pkg/minikube/cluster/cluster_non_windows_panic.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build !windows - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import "github.com/docker/machine/libmachine/drivers" - -func createHypervHost(config MachineConfig) drivers.Driver { - panic("hyperv not supported") -} diff --git a/pkg/minikube/cluster/cluster_test.go b/pkg/minikube/cluster/cluster_test.go index 89fe3a5bd23c..f5c3e2ce1b27 100644 --- a/pkg/minikube/cluster/cluster_test.go +++ b/pkg/minikube/cluster/cluster_test.go @@ -27,6 +27,7 @@ import ( "github.com/docker/machine/libmachine/state" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/registry" "k8s.io/minikube/pkg/minikube/tests" ) @@ -35,7 +36,7 @@ type MockDownloader struct{} func (d MockDownloader) GetISOFileURI(isoURL string) string { return "" } func (d MockDownloader) CacheMinikubeISOFromURL(isoURL string) error { return nil } -var defaultMachineConfig = MachineConfig{ +var defaultMachineConfig = config.MachineConfig{ MachineName: "test-cluster", VMDriver: constants.DefaultVMDriver, MinikubeISO: constants.DefaultIsoUrl, @@ -68,15 +69,15 @@ func TestCreateHost(t *testing.T) { } found := false - for _, driver := range constants.SupportedVMDrivers { - if h.DriverName == driver { + for _, def := range registry.ListDrivers() { + if h.DriverName == def.Name { found = true break } } if !found { - t.Fatalf("Wrong driver name: %v. Should be virtualbox, vmwarefusion, kvm or xhyve.", h.DriverName) + t.Fatalf("Wrong driver name: %v. It should be among drivers %v", h.DriverName, registry.ListDrivers()) } } @@ -180,7 +181,7 @@ func TestStartHostConfig(t *testing.T) { md := &tests.MockDetector{Provisioner: &tests.MockProvisioner{}} provision.SetDetector(md) - config := MachineConfig{ + config := config.MachineConfig{ VMDriver: constants.DefaultVMDriver, DockerEnv: []string{"FOO=BAR"}, DockerOpt: []string{"param=value"}, diff --git a/pkg/minikube/cluster/cluster_windows.go b/pkg/minikube/cluster/cluster_windows.go index a6aa37343a6f..b0d98dc831f5 100644 --- a/pkg/minikube/cluster/cluster_windows.go +++ b/pkg/minikube/cluster/cluster_windows.go @@ -22,26 +22,11 @@ import ( "os/exec" "path/filepath" - "github.com/docker/machine/drivers/hyperv" - "github.com/docker/machine/libmachine/drivers" "github.com/golang/glog" "github.com/pkg/errors" "golang.org/x/sys/windows/registry" - cfg "k8s.io/minikube/pkg/minikube/config" - "k8s.io/minikube/pkg/minikube/constants" ) -func createHypervHost(config MachineConfig) drivers.Driver { - d := hyperv.NewDriver(cfg.GetMachineName(), constants.GetMinipath()) - d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) - d.VSwitch = config.HypervVirtualSwitch - d.MemSize = config.Memory - d.CPU = config.CPUs - d.DiskSize = int(config.DiskSize) - d.SSHUser = "docker" - return d -} - func detectVBoxManageCmd() string { cmd := "VBoxManage" if p := os.Getenv("VBOX_INSTALL_PATH"); p != "" { diff --git a/pkg/minikube/cluster/default_drivers.go b/pkg/minikube/cluster/default_drivers.go new file mode 100644 index 000000000000..e1030f2f93d9 --- /dev/null +++ b/pkg/minikube/cluster/default_drivers.go @@ -0,0 +1,28 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + _ "k8s.io/minikube/pkg/minikube/drivers/hyperkit" + _ "k8s.io/minikube/pkg/minikube/drivers/hyperv" + _ "k8s.io/minikube/pkg/minikube/drivers/kvm" + _ "k8s.io/minikube/pkg/minikube/drivers/kvm2" + _ "k8s.io/minikube/pkg/minikube/drivers/none" + _ "k8s.io/minikube/pkg/minikube/drivers/virtualbox" + _ "k8s.io/minikube/pkg/minikube/drivers/vmwarefusion" + _ "k8s.io/minikube/pkg/minikube/drivers/xhyve" +) diff --git a/pkg/minikube/cluster/types.go b/pkg/minikube/cluster/types.go deleted file mode 100644 index f17ba9aad2d2..000000000000 --- a/pkg/minikube/cluster/types.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cluster - -import ( - "k8s.io/minikube/pkg/minikube" - "k8s.io/minikube/pkg/minikube/bootstrapper" - "k8s.io/minikube/pkg/util" -) - -// MachineConfig contains the parameters used to start a cluster. -type MachineConfig struct { - MachineName string - MinikubeISO string - Memory int - CPUs int - DiskSize int - VMDriver string - XhyveDiskDriver string // Only used by the xhyve driver - DockerEnv []string // Each entry is formatted as KEY=VALUE. - InsecureRegistry []string - RegistryMirror []string - HostOnlyCIDR string // Only used by the virtualbox driver - HypervVirtualSwitch string - KvmNetwork string // Only used by the KVM driver - Downloader util.ISODownloader `json:"-"` - DockerOpt []string // Each entry is formatted as KEY=VALUE. - DisableDriverMounts bool // Only used by virtualbox and xhyve - NFSShare []string - NFSSharesRoot string - UUID string // Only used by hyperkit to restore the mac address -} - -// Config contains machine and k8s config -type Config struct { - ClusterName string - MachineConfig MachineConfig - KubernetesConfig bootstrapper.KubernetesConfig - Nodes []minikube.NodeConfig -} diff --git a/pkg/minikube/config/config.go b/pkg/minikube/config/config.go index 4919c7b58019..8a53d54b2b28 100644 --- a/pkg/minikube/config/config.go +++ b/pkg/minikube/config/config.go @@ -28,14 +28,15 @@ import ( ) const ( - WantUpdateNotification = "WantUpdateNotification" - ReminderWaitPeriodInHours = "ReminderWaitPeriodInHours" - WantReportError = "WantReportError" - WantReportErrorPrompt = "WantReportErrorPrompt" - WantKubectlDownloadMsg = "WantKubectlDownloadMsg" - WantNoneDriverWarning = "WantNoneDriverWarning" - MachineProfile = "profile" - ShowDriverDeprecationNotification = "ShowDriverDeprecationNotification" + WantUpdateNotification = "WantUpdateNotification" + ReminderWaitPeriodInHours = "ReminderWaitPeriodInHours" + WantReportError = "WantReportError" + WantReportErrorPrompt = "WantReportErrorPrompt" + WantKubectlDownloadMsg = "WantKubectlDownloadMsg" + WantNoneDriverWarning = "WantNoneDriverWarning" + MachineProfile = "profile" + ShowDriverDeprecationNotification = "ShowDriverDeprecationNotification" + ShowBootstrapperDeprecationNotification = "ShowBootstrapperDeprecationNotification" ) type MinikubeConfig map[string]interface{} @@ -64,6 +65,8 @@ func ReadConfig() (MinikubeConfig, error) { } return nil, fmt.Errorf("Could not open file %s: %s", constants.ConfigFile, err) } + defer f.Close() + m, err := decode(f) if err != nil { return nil, fmt.Errorf("Could not decode config %s: %s", constants.ConfigFile, err) diff --git a/pkg/minikube/config/types.go b/pkg/minikube/config/types.go new file mode 100644 index 000000000000..82cdd4d392ae --- /dev/null +++ b/pkg/minikube/config/types.go @@ -0,0 +1,78 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "net" + + "k8s.io/minikube/pkg/util" +) + +// Config contains machine and k8s config +type Config struct { + ClusterName string + MachineConfig MachineConfig + KubernetesConfig KubernetesConfig + Nodes []NodeConfig +} + +// MachineConfig contains the parameters used to start a cluster. +type MachineConfig struct { + MachineName string + MinikubeISO string + Memory int + CPUs int + DiskSize int + VMDriver string + XhyveDiskDriver string // Only used by the xhyve driver + DockerEnv []string // Each entry is formatted as KEY=VALUE. + InsecureRegistry []string + RegistryMirror []string + HostOnlyCIDR string // Only used by the virtualbox driver + HypervVirtualSwitch string + KvmNetwork string // Only used by the KVM driver + Downloader util.ISODownloader `json:"-"` + DockerOpt []string // Each entry is formatted as KEY=VALUE. + DisableDriverMounts bool // Only used by virtualbox and xhyve + NFSShare []string + NFSSharesRoot string + UUID string // Only used by hyperkit to restore the mac address +} + +// KubernetesConfig contains the parameters used to configure the VM Kubernetes. +type KubernetesConfig struct { + KubernetesVersion string + NodeIP string + NodeName string + APIServerName string + APIServerNames []string + APIServerIPs []net.IP + DNSDomain string + ContainerRuntime string + NetworkPlugin string + FeatureGates string + ServiceCIDR string + ExtraOptions util.ExtraOptionSlice + + ShouldLoadCachedImages bool + BootstrapToken string +} + +// NodeConfig contains the parameters used to start a node. +type NodeConfig struct { + Name string +} diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index 1f567e1c58e8..faafe1aa47d4 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -46,6 +46,16 @@ func GetMinipath() string { return filepath.Join(os.Getenv(MinikubeHome), ".minikube") } +// SupportedVMDrivers is a list of supported drivers on all platforms. Currently +// used in gendocs. +var SupportedVMDrivers = [...]string{ + "virtualbox", + "vmwarefusion", + "kvm", + "xhyve", + "hyperv", +} + var DefaultMinipath = filepath.Join(homedir.HomeDir(), ".minikube") // KubeconfigPath is the path to the Kubernetes client config @@ -101,7 +111,7 @@ const ( KubernetesVersionGCSURL = "https://storage.googleapis.com/minikube/k8s_releases.json" DefaultWait = 20 DefaultInterval = 6 - DefaultClusterBootstrapper = "localkube" + DefaultClusterBootstrapper = "kubeadm" ) var DefaultIsoUrl = fmt.Sprintf("https://storage.googleapis.com/%s/minikube-%s.iso", minikubeVersion.GetIsoPath(), minikubeVersion.GetIsoVersion()) @@ -145,6 +155,16 @@ const ( KubeadmConfigFile = "/var/lib/kubeadm.yaml" ) +var Preflights = []string{ + "DirAvailable--etc-kubernetes-manifests", + "DirAvailable--data", + "FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml", + "FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml", + "FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml", + "FileAvailable--etc-kubernetes-manifests-etcd.yaml", + "Swap", +} + const ( LocalkubeServicePath = "/etc/systemd/system/localkube.service" LocalkubeRunning = "active" @@ -181,7 +201,7 @@ var LocalkubeCachedImages = []string{ "k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.5", // Addon Manager - "gcr.io/google-containers/kube-addon-manager:v6.5", + "k8s.gcr.io/kube-addon-manager:v6.5", // Pause "k8s.gcr.io/pause-amd64:3.0", @@ -196,7 +216,7 @@ func GetKubeadmCachedImages(version string) []string { "k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.1", // Addon Manager - "gcr.io/google-containers/kube-addon-manager:v6.5", + "k8s.gcr.io/kube-addon-manager:v6.5", // Pause "k8s.gcr.io/pause-amd64:3.0", diff --git a/pkg/minikube/constants/constants_darwin.go b/pkg/minikube/constants/constants_darwin.go index a6effc509fe2..081ffea3fecf 100644 --- a/pkg/minikube/constants/constants_darwin.go +++ b/pkg/minikube/constants/constants_darwin.go @@ -1,4 +1,4 @@ -// +build darwin,!gendocs +// +build darwin /* Copyright 2016 The Kubernetes Authors All rights reserved. @@ -18,11 +18,4 @@ limitations under the License. package constants -var SupportedVMDrivers = [...]string{ - "virtualbox", - "xhyve", - "vmwarefusion", - "hyperkit", -} - var DefaultMountDir = "/Users" diff --git a/pkg/minikube/constants/constants_gendocs.go b/pkg/minikube/constants/constants_gendocs.go deleted file mode 100644 index 6c34e66cc431..000000000000 --- a/pkg/minikube/constants/constants_gendocs.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build gendocs - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package constants - -var SupportedVMDrivers = [...]string{ - "virtualbox", - "vmwarefusion", - "kvm", - "xhyve", - "hyperv", -} diff --git a/pkg/minikube/constants/constants_linux.go b/pkg/minikube/constants/constants_linux.go index 30e020310510..1df7788d30ac 100644 --- a/pkg/minikube/constants/constants_linux.go +++ b/pkg/minikube/constants/constants_linux.go @@ -1,4 +1,4 @@ -// +build linux,!gendocs +// +build linux /* Copyright 2016 The Kubernetes Authors All rights reserved. @@ -22,11 +22,4 @@ import ( "k8s.io/client-go/util/homedir" ) -var SupportedVMDrivers = [...]string{ - "virtualbox", - "kvm", - "kvm2", - "none", -} - var DefaultMountDir = homedir.HomeDir() diff --git a/pkg/minikube/constants/constants_windows.go b/pkg/minikube/constants/constants_windows.go index 475f2f322c0d..aa66af22a660 100644 --- a/pkg/minikube/constants/constants_windows.go +++ b/pkg/minikube/constants/constants_windows.go @@ -1,4 +1,4 @@ -// +build windows,!gendocs +// +build windows /* Copyright 2016 The Kubernetes Authors All rights reserved. @@ -22,9 +22,4 @@ import ( "k8s.io/client-go/util/homedir" ) -var SupportedVMDrivers = [...]string{ - "virtualbox", - "hyperv", -} - var DefaultMountDir = homedir.HomeDir() diff --git a/pkg/minikube/drivers/hyperkit/doc.go b/pkg/minikube/drivers/hyperkit/doc.go new file mode 100644 index 000000000000..05355f071c74 --- /dev/null +++ b/pkg/minikube/drivers/hyperkit/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hyperkit diff --git a/pkg/minikube/drivers/hyperkit/driver.go b/pkg/minikube/drivers/hyperkit/driver.go new file mode 100644 index 000000000000..e932296c9adf --- /dev/null +++ b/pkg/minikube/drivers/hyperkit/driver.go @@ -0,0 +1,54 @@ +// +build darwin + +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hyperkit + +import ( + "github.com/docker/machine/libmachine/drivers" + "github.com/pborman/uuid" + "k8s.io/minikube/pkg/drivers/hyperkit" + cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/registry" +) + +func init() { + registry.Register(registry.DriverDef{ + Name: "hyperkit", + Builtin: false, + ConfigCreator: createHyperkitHost, + }) +} + +func createHyperkitHost(config cfg.MachineConfig) interface{} { + return &hyperkit.Driver{ + BaseDriver: &drivers.BaseDriver{ + MachineName: cfg.GetMachineName(), + StorePath: constants.GetMinipath(), + SSHUser: "docker", + }, + Boot2DockerURL: config.Downloader.GetISOFileURI(config.MinikubeISO), + DiskSize: config.DiskSize, + Memory: config.Memory, + CPU: config.CPUs, + NFSShares: config.NFSShare, + NFSSharesRoot: config.NFSSharesRoot, + UUID: uuid.NewUUID().String(), + Cmdline: "loglevel=3 user=docker console=ttyS0 console=tty0 noembed nomodeset norestore waitusb=10 systemd.legacy_systemd_cgroup_controller=yes base host=" + cfg.GetMachineName(), + } +} diff --git a/pkg/minikube/drivers/hyperv/doc.go b/pkg/minikube/drivers/hyperv/doc.go new file mode 100644 index 000000000000..9d380b9e8e17 --- /dev/null +++ b/pkg/minikube/drivers/hyperv/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hyperv diff --git a/pkg/minikube/drivers/hyperv/driver.go b/pkg/minikube/drivers/hyperv/driver.go new file mode 100644 index 000000000000..2323b6383024 --- /dev/null +++ b/pkg/minikube/drivers/hyperv/driver.go @@ -0,0 +1,51 @@ +// +build windows + +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hyperv + +import ( + "github.com/docker/machine/drivers/hyperv" + "github.com/docker/machine/libmachine/drivers" + cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/registry" +) + +func init() { + registry.Register(registry.DriverDef{ + Name: "hyperv", + Builtin: true, + ConfigCreator: createHypervHost, + DriverCreator: func() drivers.Driver { + return hyperv.NewDriver("", "") + }, + }) +} + +func createHypervHost(config cfg.MachineConfig) interface{} { + d := hyperv.NewDriver(cfg.GetMachineName(), constants.GetMinipath()) + + d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) + d.VSwitch = config.HypervVirtualSwitch + d.MemSize = config.Memory + d.CPU = config.CPUs + d.DiskSize = int(config.DiskSize) + d.SSHUser = "docker" + + return d +} diff --git a/pkg/minikube/drivers/kvm/doc.go b/pkg/minikube/drivers/kvm/doc.go new file mode 100644 index 000000000000..d15b5fab9d71 --- /dev/null +++ b/pkg/minikube/drivers/kvm/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kvm + +// doc... diff --git a/pkg/minikube/drivers/kvm/driver.go b/pkg/minikube/drivers/kvm/driver.go new file mode 100644 index 000000000000..d053725375d5 --- /dev/null +++ b/pkg/minikube/drivers/kvm/driver.go @@ -0,0 +1,74 @@ +// +build linux + +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kvm + +import ( + "fmt" + "path/filepath" + + "github.com/docker/machine/libmachine/drivers" + cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/registry" +) + +func init() { + registry.Register(registry.DriverDef{ + Name: "kvm", + Builtin: false, + ConfigCreator: createKVMHost, + }) +} + +// Delete this once the following PR is merged: +// https://github.com/dhiltgen/docker-machine-kvm/pull/68 +type kvmDriver struct { + *drivers.BaseDriver + + Memory int + DiskSize int + CPU int + Network string + PrivateNetwork string + ISO string + Boot2DockerURL string + DiskPath string + CacheMode string + IOMode string +} + +func createKVMHost(config cfg.MachineConfig) interface{} { + return &kvmDriver{ + BaseDriver: &drivers.BaseDriver{ + MachineName: cfg.GetMachineName(), + StorePath: constants.GetMinipath(), + SSHUser: "docker", + }, + Memory: config.Memory, + CPU: config.CPUs, + Network: config.KvmNetwork, + PrivateNetwork: "docker-machines", + Boot2DockerURL: config.Downloader.GetISOFileURI(config.MinikubeISO), + DiskSize: config.DiskSize, + DiskPath: filepath.Join(constants.GetMinipath(), "machines", cfg.GetMachineName(), fmt.Sprintf("%s.rawdisk", cfg.GetMachineName())), + ISO: filepath.Join(constants.GetMinipath(), "machines", cfg.GetMachineName(), "boot2docker.iso"), + CacheMode: "default", + IOMode: "threads", + } +} diff --git a/pkg/minikube/drivers/kvm2/doc.go b/pkg/minikube/drivers/kvm2/doc.go new file mode 100644 index 000000000000..c757fe84543c --- /dev/null +++ b/pkg/minikube/drivers/kvm2/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kvm2 diff --git a/pkg/minikube/drivers/kvm2/driver.go b/pkg/minikube/drivers/kvm2/driver.go new file mode 100644 index 000000000000..3c101ce60b65 --- /dev/null +++ b/pkg/minikube/drivers/kvm2/driver.go @@ -0,0 +1,74 @@ +// +build linux + +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kvm2 + +import ( + "fmt" + "path/filepath" + + "github.com/docker/machine/libmachine/drivers" + cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/registry" +) + +func init() { + registry.Register(registry.DriverDef{ + Name: "kvm2", + Builtin: false, + ConfigCreator: createKVM2Host, + }) +} + +// Delete this once the following PR is merged: +// https://github.com/dhiltgen/docker-machine-kvm/pull/68 +type kvmDriver struct { + *drivers.BaseDriver + + Memory int + DiskSize int + CPU int + Network string + PrivateNetwork string + ISO string + Boot2DockerURL string + DiskPath string + CacheMode string + IOMode string +} + +func createKVM2Host(config cfg.MachineConfig) interface{} { + return &kvmDriver{ + BaseDriver: &drivers.BaseDriver{ + MachineName: cfg.GetMachineName(), + StorePath: constants.GetMinipath(), + SSHUser: "docker", + }, + Memory: config.Memory, + CPU: config.CPUs, + Network: config.KvmNetwork, + PrivateNetwork: "minikube-net", + Boot2DockerURL: config.Downloader.GetISOFileURI(config.MinikubeISO), + DiskSize: config.DiskSize, + DiskPath: filepath.Join(constants.GetMinipath(), "machines", cfg.GetMachineName(), fmt.Sprintf("%s.rawdisk", cfg.GetMachineName())), + ISO: filepath.Join(constants.GetMinipath(), "machines", cfg.GetMachineName(), "boot2docker.iso"), + CacheMode: "default", + IOMode: "threads", + } +} diff --git a/pkg/minikube/drivers/none/doc.go b/pkg/minikube/drivers/none/doc.go new file mode 100644 index 000000000000..ac83767daf76 --- /dev/null +++ b/pkg/minikube/drivers/none/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package none diff --git a/pkg/minikube/drivers/none/driver.go b/pkg/minikube/drivers/none/driver.go new file mode 100644 index 000000000000..0bbe465243fd --- /dev/null +++ b/pkg/minikube/drivers/none/driver.go @@ -0,0 +1,47 @@ +// +build linux + +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package none + +import ( + "github.com/docker/machine/libmachine/drivers" + "k8s.io/minikube/pkg/drivers/none" + cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/registry" +) + +func init() { + registry.Register(registry.DriverDef{ + Name: "none", + Builtin: true, + ConfigCreator: createNoneHost, + DriverCreator: func() drivers.Driver { + return none.NewDriver("", "") + }, + }) +} + +func createNoneHost(config cfg.MachineConfig) interface{} { + return &none.Driver{ + BaseDriver: &drivers.BaseDriver{ + MachineName: cfg.GetMachineName(), + StorePath: constants.GetMinipath(), + }, + } +} diff --git a/pkg/minikube/drivers/virtualbox/doc.go b/pkg/minikube/drivers/virtualbox/doc.go new file mode 100644 index 000000000000..0e6eff9e8ca1 --- /dev/null +++ b/pkg/minikube/drivers/virtualbox/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package virtualbox diff --git a/pkg/minikube/drivers/virtualbox/driver.go b/pkg/minikube/drivers/virtualbox/driver.go new file mode 100644 index 000000000000..29eb5b145f6f --- /dev/null +++ b/pkg/minikube/drivers/virtualbox/driver.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package virtualbox + +import ( + "github.com/docker/machine/drivers/virtualbox" + "github.com/docker/machine/libmachine/drivers" + cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/registry" +) + +const defaultVirtualboxNicType = "virtio" + +func init() { + registry.Register(registry.DriverDef{ + Name: "virtualbox", + Builtin: true, + ConfigCreator: createVirtualboxHost, + DriverCreator: func() drivers.Driver { + return virtualbox.NewDriver("", "") + }, + }) +} + +func createVirtualboxHost(config cfg.MachineConfig) interface{} { + d := virtualbox.NewDriver(cfg.GetMachineName(), constants.GetMinipath()) + + d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) + d.Memory = config.Memory + d.CPU = config.CPUs + d.DiskSize = int(config.DiskSize) + d.HostOnlyCIDR = config.HostOnlyCIDR + d.NoShare = config.DisableDriverMounts + d.NatNicType = defaultVirtualboxNicType + d.HostOnlyNicType = defaultVirtualboxNicType + + return d +} diff --git a/pkg/minikube/drivers/vmwarefusion/doc.go b/pkg/minikube/drivers/vmwarefusion/doc.go new file mode 100644 index 000000000000..69b2b8f8d77c --- /dev/null +++ b/pkg/minikube/drivers/vmwarefusion/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vmwarefusion diff --git a/pkg/minikube/drivers/vmwarefusion/driver.go b/pkg/minikube/drivers/vmwarefusion/driver.go new file mode 100644 index 000000000000..0e6e19841184 --- /dev/null +++ b/pkg/minikube/drivers/vmwarefusion/driver.go @@ -0,0 +1,51 @@ +// +build darwin + +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vmwarefusion + +import ( + "github.com/docker/machine/drivers/vmwarefusion" + "github.com/docker/machine/libmachine/drivers" + cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/registry" +) + +func init() { + registry.Register(registry.DriverDef{ + Name: "vmwarefusion", + Builtin: true, + ConfigCreator: createVMwareFusionHost, + DriverCreator: func() drivers.Driver { + return vmwarefusion.NewDriver("", "") + }, + }) +} + +func createVMwareFusionHost(config cfg.MachineConfig) interface{} { + d := vmwarefusion.NewDriver(cfg.GetMachineName(), constants.GetMinipath()).(*vmwarefusion.Driver) + d.Boot2DockerURL = config.Downloader.GetISOFileURI(config.MinikubeISO) + d.Memory = config.Memory + d.CPU = config.CPUs + d.DiskSize = config.DiskSize + + // TODO(philips): push these defaults upstream to fixup this driver + d.SSHPort = 22 + d.ISO = d.ResolveStorePath("boot2docker.iso") + return d +} diff --git a/pkg/minikube/drivers/xhyve/doc.go b/pkg/minikube/drivers/xhyve/doc.go new file mode 100644 index 000000000000..1d8c8b5ae3e8 --- /dev/null +++ b/pkg/minikube/drivers/xhyve/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package xhyve diff --git a/pkg/minikube/drivers/xhyve/driver.go b/pkg/minikube/drivers/xhyve/driver.go new file mode 100644 index 000000000000..0e16ae8c9d35 --- /dev/null +++ b/pkg/minikube/drivers/xhyve/driver.go @@ -0,0 +1,86 @@ +// +build darwin + +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package xhyve + +import ( + "fmt" + "os" + + "github.com/docker/machine/libmachine/drivers" + cfg "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/registry" +) + +const errMsg = ` +The Xhyve driver is not included in minikube yet. Please follow the directions at +https://github.com/kubernetes/minikube/blob/master/DRIVERS.md#xhyve-driver +` + +func init() { + registry.Register(registry.DriverDef{ + Name: "xhyve", + Builtin: false, + ConfigCreator: createXhyveHost, + DriverCreator: func() drivers.Driver { + fmt.Fprintln(os.Stderr, errMsg) + os.Exit(1) + return nil + }, + }) +} + +type xhyveDriver struct { + *drivers.BaseDriver + + Boot2DockerURL string + BootCmd string + CPU int + CaCertPath string + DiskSize int64 + MacAddr string + Memory int + PrivateKeyPath string + UUID string + NFSShare bool + DiskNumber int + Virtio9p bool + Virtio9pFolder string + QCow2 bool + RawDisk bool +} + +func createXhyveHost(config cfg.MachineConfig) interface{} { + useVirtio9p := !config.DisableDriverMounts + return &xhyveDriver{ + BaseDriver: &drivers.BaseDriver{ + MachineName: cfg.GetMachineName(), + StorePath: constants.GetMinipath(), + }, + Memory: config.Memory, + CPU: config.CPUs, + Boot2DockerURL: config.Downloader.GetISOFileURI(config.MinikubeISO), + BootCmd: "loglevel=3 user=docker console=ttyS0 console=tty0 noembed nomodeset norestore waitusb=10 systemd.legacy_systemd_cgroup_controller=yes base host=" + cfg.GetMachineName(), + DiskSize: int64(config.DiskSize), + Virtio9p: useVirtio9p, + Virtio9pFolder: "/Users", + QCow2: false, + RawDisk: config.XhyveDiskDriver == "virtio-blk", + } +} diff --git a/pkg/minikube/machine/cache_images.go b/pkg/minikube/machine/cache_images.go index dbfb516f05e5..0197daf9d3b7 100644 --- a/pkg/minikube/machine/cache_images.go +++ b/pkg/minikube/machine/cache_images.go @@ -59,8 +59,8 @@ func CacheImagesForBootstrapper(version string, clusterBootstrapper string) erro // CacheImages will cache images on the host // // The cache directory currently caches images using the imagename_tag -// For example, gcr.io/google-containers-kube-addon-manager:v6.5 would be -// stored at $CACHE_DIR/gcr.io/google-containers/kube-addon-manager_v6.5 +// For example, k8s.gcr.io/kube-addon-manager:v6.5 would be +// stored at $CACHE_DIR/k8s.gcr.io/kube-addon-manager_v6.5 func CacheImages(images []string, cacheDir string) error { var g errgroup.Group for _, image := range images { diff --git a/pkg/minikube/machine/client.go b/pkg/minikube/machine/client.go index 3bf8263ac9e8..2b105b9735cb 100644 --- a/pkg/minikube/machine/client.go +++ b/pkg/minikube/machine/client.go @@ -27,15 +27,16 @@ import ( "k8s.io/minikube/pkg/minikube/bootstrapper/runner" "k8s.io/minikube/pkg/minikube/constants" + "k8s.io/minikube/pkg/minikube/registry" "k8s.io/minikube/pkg/minikube/sshutil" "k8s.io/minikube/pkg/provision" - "github.com/docker/machine/drivers/virtualbox" "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/auth" "github.com/docker/machine/libmachine/cert" "github.com/docker/machine/libmachine/check" "github.com/docker/machine/libmachine/drivers" + "github.com/docker/machine/libmachine/drivers/plugin" "github.com/docker/machine/libmachine/drivers/plugin/localbinary" "github.com/docker/machine/libmachine/engine" "github.com/docker/machine/libmachine/host" @@ -45,11 +46,10 @@ import ( "github.com/docker/machine/libmachine/state" "github.com/docker/machine/libmachine/swarm" "github.com/docker/machine/libmachine/version" + "github.com/golang/glog" "github.com/pkg/errors" ) -type driverGetter func([]byte) (drivers.Driver, error) - func NewRPCClient(storePath, certsDir string) libmachine.API { c := libmachine.NewClient(storePath, certsDir) c.SSHClientType = ssh.Native @@ -69,29 +69,6 @@ func NewAPIClient() (libmachine.API, error) { }, nil } -func getDriver(driverName string, rawDriver []byte) (drivers.Driver, error) { - driverGetter, ok := driverMap[driverName] - if !ok { - return nil, fmt.Errorf("Unknown driver %s for platform.", driverName) - } - driver, err := driverGetter(rawDriver) - if err != nil { - return nil, errors.Wrapf(err, "Error getting driver for %s", driverName) - } - - return driver, nil -} - -func getVirtualboxDriver(rawDriver []byte) (drivers.Driver, error) { - var driver drivers.Driver - driver = virtualbox.NewDriver("", "") - err := json.Unmarshal(rawDriver, driver) - if err != nil { - return nil, errors.Wrapf(err, "Error unmarshalling virtualbox driver %s", string(rawDriver)) - } - return driver, nil -} - // LocalClient is a non-RPC implementation // of the libmachine API type LocalClient struct { @@ -102,15 +79,21 @@ type LocalClient struct { } func (api *LocalClient) NewHost(driverName string, rawDriver []byte) (*host.Host, error) { - // If not should get Driver, use legacy - if _, ok := driverMap[driverName]; !ok { + var def registry.DriverDef + var err error + if def, err = registry.Driver(driverName); err != nil { + return nil, err + } else if !def.Builtin || def.DriverCreator == nil { return api.legacyClient.NewHost(driverName, rawDriver) } - driver, err := getDriver(driverName, rawDriver) + driver := def.DriverCreator() + + err = json.Unmarshal(rawDriver, driver) if err != nil { - return nil, errors.Wrap(err, "Error getting driver") + return nil, errors.Wrapf(err, "Error getting driver %s", string(rawDriver)) } + return &host.Host{ ConfigVersion: version.ConfigVersion, Name: driver.GetMachineName(), @@ -141,17 +124,22 @@ func (api *LocalClient) Load(name string) (*host.Host, error) { return nil, errors.Wrap(err, "Error loading host from store") } - // If not should get Driver, use legacy - if _, ok := driverMap[h.DriverName]; !ok { + var def registry.DriverDef + if def, err = registry.Driver(h.DriverName); err != nil { + return nil, err + } else if !def.Builtin || def.DriverCreator == nil { return api.legacyClient.Load(name) } - h.Driver, err = getDriver(h.DriverName, h.RawDriver) - if err != nil { - return nil, errors.Wrap(err, "Error loading driver from host") - } + h.Driver = def.DriverCreator() + return h, json.Unmarshal(h.RawDriver, h.Driver) +} - return h, nil +func (api *LocalClient) Close() error { + if api.legacyClient != nil { + return api.legacyClient.Close() + } + return nil } func GetCommandRunner(h *host.Host) (runner.CommandRunner, error) { @@ -166,16 +154,10 @@ func GetCommandRunner(h *host.Host) (runner.CommandRunner, error) { return &runner.ExecRunner{}, nil } -func (api *LocalClient) Close() error { - if api.legacyClient != nil { - return api.legacyClient.Close() - } - return nil -} - func (api *LocalClient) Create(h *host.Host) error { - - if _, ok := driverMap[h.Driver.DriverName()]; !ok { + if def, err := registry.Driver(h.DriverName); err != nil { + return err + } else if !def.Builtin || def.DriverCreator == nil { return api.legacyClient.Create(h) } @@ -276,3 +258,12 @@ func (cg *CertGenerator) ValidateCertificate(addr string, authOptions *auth.Opti return true, nil } + +func registerDriver(driverName string) { + def, err := registry.Driver(driverName) + if err != nil { + glog.Exitf("Unsupported driver: %s\n", driverName) + } + + plugin.RegisterDriver(def.DriverCreator()) +} diff --git a/pkg/minikube/machine/client_darwin.go b/pkg/minikube/machine/client_darwin.go deleted file mode 100644 index 04dfbc85b38d..000000000000 --- a/pkg/minikube/machine/client_darwin.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package machine - -import ( - "encoding/json" - - "github.com/docker/machine/drivers/virtualbox" - "github.com/docker/machine/drivers/vmwarefusion" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/drivers/plugin" - "github.com/golang/glog" - "github.com/pkg/errors" -) - -var driverMap = map[string]driverGetter{ - "vmwarefusion": getVMWareFusionDriver, - "virtualbox": getVirtualboxDriver, -} - -func getVMWareFusionDriver(rawDriver []byte) (drivers.Driver, error) { - var driver drivers.Driver - driver = &vmwarefusion.Driver{} - if err := json.Unmarshal(rawDriver, &driver); err != nil { - return nil, errors.Wrap(err, "Error unmarshalling vmwarefusion driver") - } - return driver, nil -} - -// Xhyve driver not implemented yet for non-RPC access -func getXhyveDriver(rawDriver []byte) (drivers.Driver, error) { - return nil, errors.New(` -The Xhyve driver is not included in minikube yet. Please follow the directions at -https://github.com/kubernetes/minikube/blob/master/DRIVERS.md#xhyve-driver -`) -} - -// StartDriver starts the desired machine driver if necessary. -func registerDriver(driverName string) { - switch driverName { - case "virtualbox": - plugin.RegisterDriver(virtualbox.NewDriver("", "")) - case "vmwarefusion": - plugin.RegisterDriver(vmwarefusion.NewDriver("", "")) - default: - glog.Exitf("Unsupported driver: %s\n", driverName) - } -} diff --git a/pkg/minikube/machine/client_linux.go b/pkg/minikube/machine/client_linux.go deleted file mode 100644 index 306c254e4151..000000000000 --- a/pkg/minikube/machine/client_linux.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package machine - -import ( - "encoding/json" - - "github.com/docker/machine/drivers/virtualbox" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/drivers/plugin" - "github.com/golang/glog" - "github.com/pkg/errors" - "k8s.io/minikube/pkg/drivers/none" -) - -var driverMap = map[string]driverGetter{ - "virtualbox": getVirtualboxDriver, - "none": getNoneDriver, -} - -func getNoneDriver(rawDriver []byte) (drivers.Driver, error) { - var driver drivers.Driver - driver = &none.Driver{} - if err := json.Unmarshal(rawDriver, &driver); err != nil { - return nil, errors.Wrap(err, "Error unmarshalling none driver") - } - return driver, nil -} - -// StartDriver starts the desired machine driver if necessary. -func registerDriver(driverName string) { - switch driverName { - case "virtualbox": - plugin.RegisterDriver(virtualbox.NewDriver("", "")) - case "none": - plugin.RegisterDriver(none.NewDriver("", "")) - default: - glog.Exitf("Unsupported driver: %s\n", driverName) - } -} diff --git a/pkg/minikube/machine/client_no_panic.go b/pkg/minikube/machine/client_no_panic.go deleted file mode 100644 index 87a48147a9d8..000000000000 --- a/pkg/minikube/machine/client_no_panic.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !linux,!windows,!darwin - -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package machine - -import "github.com/golang/glog" - -var driverMap = map[string]driverGetter{} - -func registerDriver(driverName string) { - glog.Errorf("Unsupported platform") -} diff --git a/pkg/minikube/machine/client_test.go b/pkg/minikube/machine/client_test.go index 0597eaf1c498..6671449181a6 100644 --- a/pkg/minikube/machine/client_test.go +++ b/pkg/minikube/machine/client_test.go @@ -18,6 +18,7 @@ package machine import ( "bufio" + "fmt" "io/ioutil" "log" "net" @@ -25,11 +26,9 @@ import ( "path/filepath" "testing" - "github.com/docker/machine/drivers/virtualbox" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/drivers/plugin/localbinary" "k8s.io/minikube/pkg/minikube/constants" + _ "k8s.io/minikube/pkg/minikube/drivers/virtualbox" ) const vboxConfig = ` @@ -63,54 +62,6 @@ const vboxConfig = ` } ` -func TestGetDriver(t *testing.T) { - var tests = []struct { - description string - driver string - rawDriver []byte - expected drivers.Driver - err bool - }{ - { - description: "vbox correct", - driver: "virtualbox", - rawDriver: []byte(vboxConfig), - expected: virtualbox.NewDriver("", ""), - }, - { - description: "unknown driver", - driver: "unknown", - rawDriver: []byte("?"), - expected: nil, - err: true, - }, - { - description: "vbox bad", - driver: "virtualbox", - rawDriver: []byte("?"), - expected: nil, - err: true, - }, - } - - for _, test := range tests { - test := test - t.Run(test.description, func(t *testing.T) { - t.Parallel() - driver, err := getDriver(test.driver, test.rawDriver) - if err != nil && !test.err { - t.Errorf("Unexpected error: %s", err) - } - if err == nil && test.err { - t.Errorf("No error returned, but expected err") - } - if driver != nil && test.expected.DriverName() != driver.DriverName() { - t.Errorf("Driver names did not match, actual: %s, expected: %s", driver.DriverName(), test.expected.DriverName()) - } - }) - } -} - func TestLocalClientNewHost(t *testing.T) { c, err := NewAPIClient() if err != nil { @@ -182,9 +133,9 @@ func TestRunNotDriver(t *testing.T) { func TestRunDriver(t *testing.T) { // This test is a bit complicated. It verifies that when the root command is // called with the proper environment variables, we setup the libmachine driver. - tempDir := makeTempDir() defer os.RemoveAll(tempDir) + os.Setenv(localbinary.PluginEnvKey, localbinary.PluginEnvVal) os.Setenv(localbinary.PluginEnvDriverName, "virtualbox") @@ -207,6 +158,8 @@ func TestRunDriver(t *testing.T) { } os.Stdout = old + fmt.Println(string(addr)) + // Now that we got the port, make sure we can connect. if _, err := net.Dial("tcp", string(addr)); err != nil { t.Fatal("Driver not listening.") diff --git a/pkg/minikube/machine/client_windows.go b/pkg/minikube/machine/client_windows.go deleted file mode 100644 index f2b50c1471d1..000000000000 --- a/pkg/minikube/machine/client_windows.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package machine - -import ( - "encoding/json" - - "github.com/docker/machine/drivers/hyperv" - "github.com/docker/machine/drivers/virtualbox" - "github.com/docker/machine/libmachine/drivers" - "github.com/docker/machine/libmachine/drivers/plugin" - "github.com/golang/glog" - "github.com/pkg/errors" -) - -var driverMap = map[string]driverGetter{ - "hyperv": getHyperVDriver, - "virtualbox": getVirtualboxDriver, -} - -func getHyperVDriver(rawDriver []byte) (drivers.Driver, error) { - var driver drivers.Driver - driver = &hyperv.Driver{} - if err := json.Unmarshal(rawDriver, &driver); err != nil { - return nil, errors.Wrap(err, "Error unmarshalling hyperv driver") - } - return driver, nil -} - -// StartDriver starts the desired machine driver if necessary. -func registerDriver(driverName string) { - switch driverName { - case "virtualbox": - plugin.RegisterDriver(virtualbox.NewDriver("", "")) - case "hyperv": - plugin.RegisterDriver(hyperv.NewDriver("", "")) - default: - glog.Exitf("Unsupported driver: %s\n", driverName) - } -} diff --git a/pkg/minikube/node/node.go b/pkg/minikube/node/node.go index 124fa8969674..d12f5e53018d 100644 --- a/pkg/minikube/node/node.go +++ b/pkg/minikube/node/node.go @@ -9,14 +9,15 @@ import ( "k8s.io/minikube/pkg/minikube" "k8s.io/minikube/pkg/minikube/bootstrapper/runner" "k8s.io/minikube/pkg/minikube/cluster" + cfg "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/sshutil" "k8s.io/minikube/pkg/util" ) func NewNode( - config minikube.NodeConfig, - baseConfig cluster.MachineConfig, + config cfg.NodeConfig, + baseConfig cfg.MachineConfig, clusterName string, api libmachine.API, ) minikube.Node { @@ -30,13 +31,15 @@ func NewNode( type node struct { api libmachine.API - config minikube.NodeConfig - baseConfig cluster.MachineConfig + config cfg.NodeConfig + baseConfig cfg.MachineConfig clusterName string } func (n *node) Config() minikube.NodeConfig { - return n.config + var c minikube.NodeConfig + c.Name = n.config.Name + return c } func (n *node) IP() (string, error) { @@ -91,7 +94,7 @@ func (n *node) Runner() (runner.CommandRunner, error) { return runner.NewSSHRunner(client), nil } -func (n *node) machineConfig() cluster.MachineConfig { +func (n *node) machineConfig() cfg.MachineConfig { cfg := n.baseConfig cfg.Downloader = util.DefaultDownloader{} cfg.MachineName = n.MachineName() diff --git a/pkg/minikube/registry/doc.go b/pkg/minikube/registry/doc.go new file mode 100644 index 000000000000..0ff8e0121e5b --- /dev/null +++ b/pkg/minikube/registry/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package contains the registry to enable a docker machine driver to be used +// in minikube. + +package registry diff --git a/pkg/minikube/registry/registry.go b/pkg/minikube/registry/registry.go new file mode 100644 index 000000000000..dec9e2ba297d --- /dev/null +++ b/pkg/minikube/registry/registry.go @@ -0,0 +1,138 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "fmt" + "sync" + + "github.com/docker/machine/libmachine/drivers" + "github.com/pkg/errors" + "k8s.io/minikube/pkg/minikube/config" +) + +var ( + // ErrDriverNameExist is the error returned when trying to register a driver + // which already exists in registry + ErrDriverNameExist = errors.New("registry: duplicated driver name") + + // ErrDriverNotFound is the error returned when driver of a given name does + // not exist in registry + ErrDriverNotFound = errors.New("registry: driver not found") +) + +// Registry contains all the supported driver definitions on the host +type Registry interface { + // Register a driver in registry + Register(driver DriverDef) error + + // Driver returns the registered driver from a given name + Driver(name string) (DriverDef, error) + + // List + List() []DriverDef +} + +// ConfigFactory is a function that creates a driver config from MachineConfig +type ConfigFactory func(config.MachineConfig) interface{} + +// DriverFactory is a function that load a byte stream and create a driver +type DriverFactory func() drivers.Driver + +// DriverDef defines a machine driver metadata. It tells minikube how to initialize +// and load drivers. +type DriverDef struct { + // Name of the machine driver. It has to be unique. + Name string + + // BuiltIn indicates if the driver is builtin minikube binary, or the driver is + // triggerred through RPC. + Builtin bool + + // ConfigCreator generate a raw driver object by minikube's machine config. + ConfigCreator ConfigFactory + + // DriverCreator is the factory method that creates a machine driver instance. + DriverCreator DriverFactory +} + +func (d DriverDef) String() string { + return fmt.Sprintf("{name: %s, builtin: %t}", d.Name, d.Builtin) +} + +type driverRegistry struct { + drivers map[string]DriverDef + lock sync.Mutex +} + +func createRegistry() *driverRegistry { + return &driverRegistry{ + drivers: make(map[string]DriverDef), + } +} + +var ( + registry = createRegistry() +) + +func ListDrivers() []DriverDef { + return registry.List() +} + +func Register(driver DriverDef) error { + return registry.Register(driver) +} + +func Driver(name string) (DriverDef, error) { + return registry.Driver(name) +} + +func (r *driverRegistry) Register(def DriverDef) error { + r.lock.Lock() + defer r.lock.Unlock() + + if _, ok := r.drivers[def.Name]; ok { + return ErrDriverNameExist + } + + r.drivers[def.Name] = def + return nil +} + +func (r *driverRegistry) List() []DriverDef { + r.lock.Lock() + defer r.lock.Unlock() + + result := make([]DriverDef, 0, len(r.drivers)) + + for _, def := range r.drivers { + result = append(result, def) + } + + return result +} + +func (r *driverRegistry) Driver(name string) (DriverDef, error) { + r.lock.Lock() + defer r.lock.Unlock() + + if driver, ok := r.drivers[name]; ok { + return driver, nil + } + + return DriverDef{}, ErrDriverNotFound +} diff --git a/pkg/minikube/registry/registry_test.go b/pkg/minikube/registry/registry_test.go new file mode 100644 index 000000000000..91676bd9eac0 --- /dev/null +++ b/pkg/minikube/registry/registry_test.go @@ -0,0 +1,79 @@ +/* +Copyright 2018 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package registry + +import ( + "testing" + + "k8s.io/minikube/pkg/minikube/config" +) + +func TestRegistry(t *testing.T) { + foo := DriverDef{ + Name: "foo", + Builtin: true, + ConfigCreator: func(_ config.MachineConfig) interface{} { + return nil + }, + } + bar := DriverDef{ + Name: "bar", + Builtin: true, + ConfigCreator: func(_ config.MachineConfig) interface{} { + return nil + }, + } + + registry := createRegistry() + + err := registry.Register(foo) + if err != nil { + t.Fatal("expect nil") + } + + err = registry.Register(foo) + if err != ErrDriverNameExist { + t.Fatal("expect ErrDriverNameExist") + } + + err = registry.Register(bar) + if err != nil { + t.Fatal("expect nil") + } + + list := registry.List() + if len(list) != 2 { + t.Fatalf("expect len(list) to be %d; got %d", 2, len(list)) + } + + if !(list[0].Name == "bar" && list[1].Name == "foo" || list[0].Name == "foo" && list[1].Name == "bar") { + t.Fatalf("expect registry.List return %s; got %s", []string{"bar", "foo"}, list) + } + + driver, err := registry.Driver("foo") + if err != nil { + t.Fatal("expect nil") + } + if driver.Name != "foo" { + t.Fatal("expect registry.Driver(foo) returns registered driver") + } + + driver, err = registry.Driver("foo2") + if err != ErrDriverNotFound { + t.Fatal("expect ErrDriverNotFound") + } +} diff --git a/pkg/minikube/service/service.go b/pkg/minikube/service/service.go index b45972871c7c..e224a2274a19 100644 --- a/pkg/minikube/service/service.go +++ b/pkg/minikube/service/service.go @@ -28,7 +28,7 @@ import ( "github.com/pkg/browser" "github.com/pkg/errors" "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/clientcmd" @@ -114,7 +114,7 @@ func GetServiceURLs(api libmachine.API, namespace string, t *template.Template) serviceInterface := client.Services(namespace) - svcs, err := serviceInterface.List(meta_v1.ListOptions{}) + svcs, err := serviceInterface.List(metav1.ListOptions{}) if err != nil { return nil, err } @@ -158,7 +158,7 @@ func printURLsForService(c corev1.CoreV1Interface, ip, service, namespace string } s := c.Services(namespace) - svc, err := s.Get(service, meta_v1.GetOptions{}) + svc, err := s.Get(service, metav1.GetOptions{}) if err != nil { return nil, errors.Wrapf(err, "service '%s' could not be found running", service) } @@ -211,14 +211,14 @@ func CheckService(namespace string, service string) error { } func validateService(s corev1.ServiceInterface, service string) error { - if _, err := s.Get(service, meta_v1.GetOptions{}); err != nil { + if _, err := s.Get(service, metav1.GetOptions{}); err != nil { return errors.Wrapf(err, "Error getting service %s", service) } return nil } func checkEndpointReady(endpoints corev1.EndpointsInterface, service string) error { - endpoint, err := endpoints.Get(service, meta_v1.GetOptions{}) + endpoint, err := endpoints.Get(service, metav1.GetOptions{}) if err != nil { return &util.RetriableError{Err: errors.Errorf("Error getting endpoints for service %s", service)} } @@ -274,7 +274,7 @@ func GetServiceListByLabel(namespace string, key string, value string) (*v1.Serv func getServiceListFromServicesByLabel(services corev1.ServiceInterface, key string, value string) (*v1.ServiceList, error) { selector := labels.SelectorFromSet(labels.Set(map[string]string{key: value})) - serviceList, err := services.List(meta_v1.ListOptions{LabelSelector: selector.String()}) + serviceList, err := services.List(metav1.ListOptions{LabelSelector: selector.String()}) if err != nil { return &v1.ServiceList{}, &util.RetriableError{Err: err} } @@ -293,7 +293,7 @@ func CreateSecret(namespace, name string, dataValues map[string]string, labels m return &util.RetriableError{Err: err} } - secret, _ := secrets.Get(name, meta_v1.GetOptions{}) + secret, _ := secrets.Get(name, metav1.GetOptions{}) // Delete existing secret if len(secret.Name) > 0 { @@ -311,7 +311,7 @@ func CreateSecret(namespace, name string, dataValues map[string]string, labels m // Create Secret secretObj := &v1.Secret{ - ObjectMeta: meta_v1.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Name: name, Labels: labels, }, @@ -340,7 +340,7 @@ func DeleteSecret(namespace, name string) error { return &util.RetriableError{Err: err} } - err = secrets.Delete(name, &meta_v1.DeleteOptions{}) + err = secrets.Delete(name, &metav1.DeleteOptions{}) if err != nil { return &util.RetriableError{Err: err} } diff --git a/pkg/minikube/service/service_test.go b/pkg/minikube/service/service_test.go index d89adeb81f7e..d4bd77b1a976 100644 --- a/pkg/minikube/service/service_test.go +++ b/pkg/minikube/service/service_test.go @@ -26,7 +26,7 @@ import ( "github.com/docker/machine/libmachine/host" "github.com/pkg/errors" "k8s.io/api/core/v1" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/kubernetes/typed/core/v1/fake" @@ -61,7 +61,7 @@ var defaultNamespaceServiceInterface = &MockServiceInterface{ ServiceList: &v1.ServiceList{ Items: []v1.Service{ { - ObjectMeta: meta_v1.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Name: "mock-dashboard", Namespace: "default", }, @@ -73,7 +73,7 @@ var defaultNamespaceServiceInterface = &MockServiceInterface{ }, }, { - ObjectMeta: meta_v1.ObjectMeta{ + ObjectMeta: metav1.ObjectMeta{ Name: "mock-dashboard-no-ports", Namespace: "default", }, @@ -125,7 +125,7 @@ var endpointMap = map[string]*v1.Endpoints{ }, } -func (e MockEndpointsInterface) Get(name string, _ meta_v1.GetOptions) (*v1.Endpoints, error) { +func (e MockEndpointsInterface) Get(name string, _ metav1.GetOptions) (*v1.Endpoints, error) { endpoint, ok := endpointMap[name] if !ok { return nil, errors.New("Endpoint not found") @@ -176,7 +176,7 @@ type MockServiceInterface struct { ServiceList *v1.ServiceList } -func (s MockServiceInterface) List(opts meta_v1.ListOptions) (*v1.ServiceList, error) { +func (s MockServiceInterface) List(opts metav1.ListOptions) (*v1.ServiceList, error) { serviceList := &v1.ServiceList{ Items: []v1.Service{}, } @@ -195,7 +195,7 @@ func (s MockServiceInterface) List(opts meta_v1.ListOptions) (*v1.ServiceList, e return s.ServiceList, nil } -func (s MockServiceInterface) Get(name string, _ meta_v1.GetOptions) (*v1.Service, error) { +func (s MockServiceInterface) Get(name string, _ metav1.GetOptions) (*v1.Service, error) { for _, svc := range s.ServiceList.Items { if svc.ObjectMeta.Name == name { return &svc, nil diff --git a/pkg/minikube/storageclass/storageclass.go b/pkg/minikube/storageclass/storageclass.go index 669117c728ec..45dffd45c572 100644 --- a/pkg/minikube/storageclass/storageclass.go +++ b/pkg/minikube/storageclass/storageclass.go @@ -18,7 +18,7 @@ package storageclass import ( "github.com/pkg/errors" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" @@ -39,7 +39,7 @@ func DisableDefaultStorageClass() error { return errors.Wrap(err, "Error creating new client from kubeConfig.ClientConfig()") } - err = client.Storage().StorageClasses().Delete(constants.DefaultStorageClassProvisioner, &meta_v1.DeleteOptions{}) + err = client.Storage().StorageClasses().Delete(constants.DefaultStorageClassProvisioner, &metav1.DeleteOptions{}) if err != nil { return errors.Wrapf(err, "Error deleting default storage class %s", constants.DefaultStorageClassProvisioner) } diff --git a/pkg/provision/buildroot.go b/pkg/provision/buildroot.go old mode 100755 new mode 100644 index 6f5b918b3b65..edb0e78f6033 --- a/pkg/provision/buildroot.go +++ b/pkg/provision/buildroot.go @@ -201,12 +201,14 @@ CRIO_MINIKUBE_OPTIONS='{{ range .EngineOptions.InsecureRegistry }}--insecure-reg if err := t.Execute(&crioOptsBuf, p); err != nil { return err } + if _, err = p.SSHCommand(fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | sudo tee %s", path.Dir(crioOptsPath), crioOptsBuf.String(), crioOptsPath)); err != nil { return err } + // This is unlikely to cause issues unless the user has explicitly requested CRIO, so just log a warning. if err := p.Service("crio", serviceaction.Restart); err != nil { - return err + log.Warn("Unable to restart crio service. Error: %s", err) } return nil @@ -282,7 +284,7 @@ func configureAuth(p *BuildrootProvisioner) error { return errors.Wrapf(err, "error copying %s to %s", src, dst) } if err := sshRunner.Copy(f); err != nil { - return errors.Wrapf(err, "transfering file to machine %v", f) + return errors.Wrapf(err, "transferring file to machine %v", f) } } @@ -297,6 +299,10 @@ func configureAuth(p *BuildrootProvisioner) error { return err } + if err := p.Service("docker", serviceaction.Enable); err != nil { + return err + } + if err := p.Service("docker", serviceaction.Restart); err != nil { return err } diff --git a/pkg/util/constants.go b/pkg/util/constants.go index 16f26d37de39..f7e5051a733d 100644 --- a/pkg/util/constants.go +++ b/pkg/util/constants.go @@ -32,6 +32,19 @@ const ( DefaultServiceCIDR = "10.96.0.0/12" ) +var DefaultAdmissionControllers = []string{ + "Initializers", + "NamespaceLifecycle", + "LimitRanger", + "ServiceAccount", + "DefaultStorageClass", + "DefaultTolerationSeconds", + "NodeRestriction", + "MutatingAdmissionWebhook", + "ValidatingAdmissionWebhook", + "ResourceQuota", +} + // GetServiceClusterIP returns the first IP of the ServiceCIDR func GetServiceClusterIP(serviceCIDR string) (net.IP, error) { ip, _, err := net.ParseCIDR(serviceCIDR) diff --git a/pkg/util/kubernetes.go b/pkg/util/kubernetes.go old mode 100755 new mode 100644 diff --git a/pkg/util/utils.go b/pkg/util/utils.go index 644ff8384fc8..49a7b66e0374 100644 --- a/pkg/util/utils.go +++ b/pkg/util/utils.go @@ -38,6 +38,10 @@ import ( "k8s.io/minikube/pkg/version" ) +const ( + downloadURL = "https://storage.googleapis.com/minikube/releases/%s/minikube-%s-amd64%s" +) + type RetriableError struct { Err error } @@ -155,6 +159,15 @@ func ParseSHAFromURL(url string) (string, error) { return strings.Trim(string(body), "\n"), nil } +func GetBinaryDownloadURL(version, platform string) string { + switch platform { + case "windows": + return fmt.Sprintf(downloadURL, version, platform, ".exe") + default: + return fmt.Sprintf(downloadURL, version, platform, "") + } +} + type MultiError struct { Errors []error } diff --git a/pkg/util/utils_test.go b/pkg/util/utils_test.go index 42a2a9eac0e9..899e96430ed2 100644 --- a/pkg/util/utils_test.go +++ b/pkg/util/utils_test.go @@ -162,3 +162,23 @@ Error 2` t.Fatalf("Unexpected error: %s", err) } } + +func TestGetBinaryDownloadURL(t *testing.T) { + testData := []struct { + version string + platform string + expectedURL string + }{ + {"v0.0.1", "linux", "https://storage.googleapis.com/minikube/releases/v0.0.1/minikube-linux-amd64"}, + {"v0.0.1", "darwin", "https://storage.googleapis.com/minikube/releases/v0.0.1/minikube-darwin-amd64"}, + {"v0.0.1", "windows", "https://storage.googleapis.com/minikube/releases/v0.0.1/minikube-windows-amd64.exe"}, + } + + for _, tt := range testData { + url := GetBinaryDownloadURL(tt.version, tt.platform) + if url != tt.expectedURL { + t.Fatalf("Expected '%s' but got '%s'", tt.expectedURL, url) + } + } + +} diff --git a/test/integration/docker_test.go b/test/integration/docker_test.go old mode 100755 new mode 100644 diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go old mode 100755 new mode 100644 diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go old mode 100755 new mode 100644 diff --git a/third_party/go9p/clnt_mount.go b/third_party/go9p/clnt_mount.go index 1c7a754c62a8..0321d76b6488 100644 --- a/third_party/go9p/clnt_mount.go +++ b/third_party/go9p/clnt_mount.go @@ -84,7 +84,7 @@ func MountConn(c net.Conn, aname string, msize uint32, user User) (*Clnt, error) return clnt, nil } -// Closes the connection to the file sever. +// Closes the connection to the file server. func (clnt *Clnt) Unmount() { clnt.Lock() clnt.err = &Error{"connection closed", EIO} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go index bf1a8fc34e38..76c20408fb13 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go @@ -90,7 +90,9 @@ func (client AvailabilitySetsClient) CreateOrUpdatePreparer(resourceGroupName st // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -155,7 +157,9 @@ func (client AvailabilitySetsClient) DeletePreparer(resourceGroupName string, av // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // DeleteResponder handles the response to the Delete request. The method always @@ -220,7 +224,9 @@ func (client AvailabilitySetsClient) GetPreparer(resourceGroupName string, avail // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -284,7 +290,9 @@ func (client AvailabilitySetsClient) ListPreparer(resourceGroupName string) (*ht // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -350,7 +358,9 @@ func (client AvailabilitySetsClient) ListAvailableSizesPreparer(resourceGroupNam // ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the // http.Response Body if it receives an error. func (client AvailabilitySetsClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go index 98b1c0dad648..93a44f2f20ba 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go @@ -1,6 +1,8 @@ // Package compute implements the Azure ARM Compute service API version . // // Compute Client +// +// Deprecated: Please instead use github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-03-30/compute package compute // Copyright (c) Microsoft and contributors. All rights reserved. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/containerservices.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/containerservices.go index 98c1706eef7b..a09f4c3357f3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/containerservices.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/containerservices.go @@ -144,6 +144,7 @@ func (client ContainerServicesClient) CreateOrUpdatePreparer(resourceGroupName s func (client ContainerServicesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -229,6 +230,7 @@ func (client ContainerServicesClient) DeletePreparer(resourceGroupName string, c func (client ContainerServicesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -296,7 +298,9 @@ func (client ContainerServicesClient) GetPreparer(resourceGroupName string, cont // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ContainerServicesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -358,7 +362,9 @@ func (client ContainerServicesClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ContainerServicesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -493,7 +499,9 @@ func (client ContainerServicesClient) ListByResourceGroupPreparer(resourceGroupN // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client ContainerServicesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/disks.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/disks.go index 47e9e4029ce7..18efc73b229d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/disks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/disks.go @@ -131,6 +131,7 @@ func (client DisksClient) CreateOrUpdatePreparer(resourceGroupName string, diskN func (client DisksClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -213,6 +214,7 @@ func (client DisksClient) DeletePreparer(resourceGroupName string, diskName stri func (client DisksClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -279,7 +281,9 @@ func (client DisksClient) GetPreparer(resourceGroupName string, diskName string) // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client DisksClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -372,6 +376,7 @@ func (client DisksClient) GrantAccessPreparer(resourceGroupName string, diskName func (client DisksClient) GrantAccessSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -433,7 +438,9 @@ func (client DisksClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client DisksClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -566,7 +573,9 @@ func (client DisksClient) ListByResourceGroupPreparer(resourceGroupName string) // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client DisksClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -717,6 +726,7 @@ func (client DisksClient) RevokeAccessPreparer(resourceGroupName string, diskNam func (client DisksClient) RevokeAccessSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -801,6 +811,7 @@ func (client DisksClient) UpdatePreparer(resourceGroupName string, diskName stri func (client DisksClient) UpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/images.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/images.go index bbde2bc7f95f..6a5c06ad1535 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/images.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/images.go @@ -119,6 +119,7 @@ func (client ImagesClient) CreateOrUpdatePreparer(resourceGroupName string, imag func (client ImagesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -200,6 +201,7 @@ func (client ImagesClient) DeletePreparer(resourceGroupName string, imageName st func (client ImagesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -269,7 +271,9 @@ func (client ImagesClient) GetPreparer(resourceGroupName string, imageName strin // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -331,7 +335,9 @@ func (client ImagesClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -464,7 +470,9 @@ func (client ImagesClient) ListByResourceGroupPreparer(resourceGroupName string) // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client ImagesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/resourceskus.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/resourceskus.go index 9dd7fffa8564..c92374bfaa5f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/resourceskus.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/resourceskus.go @@ -83,7 +83,9 @@ func (client ResourceSkusClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ResourceSkusClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/snapshots.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/snapshots.go index 2c7135292156..2c77c51ce8e0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/snapshots.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/snapshots.go @@ -131,6 +131,7 @@ func (client SnapshotsClient) CreateOrUpdatePreparer(resourceGroupName string, s func (client SnapshotsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -213,6 +214,7 @@ func (client SnapshotsClient) DeletePreparer(resourceGroupName string, snapshotN func (client SnapshotsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -279,7 +281,9 @@ func (client SnapshotsClient) GetPreparer(resourceGroupName string, snapshotName // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client SnapshotsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -373,6 +377,7 @@ func (client SnapshotsClient) GrantAccessPreparer(resourceGroupName string, snap func (client SnapshotsClient) GrantAccessSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -434,7 +439,9 @@ func (client SnapshotsClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client SnapshotsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -567,7 +574,9 @@ func (client SnapshotsClient) ListByResourceGroupPreparer(resourceGroupName stri // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client SnapshotsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -718,6 +727,7 @@ func (client SnapshotsClient) RevokeAccessPreparer(resourceGroupName string, sna func (client SnapshotsClient) RevokeAccessSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -802,6 +812,7 @@ func (client SnapshotsClient) UpdatePreparer(resourceGroupName string, snapshotN func (client SnapshotsClient) UpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usage.go index 86e9ebabf3b7..ddf8917e3cbe 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usage.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usage.go @@ -94,7 +94,9 @@ func (client UsageClient) ListPreparer(location string) (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go index 2f9cac6f3bc7..00a3106b8812 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go @@ -19,10 +19,10 @@ package compute // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/v11.0.0-beta arm-compute/" + return "Azure-SDK-For-Go/v12.4.0-beta arm-compute/" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return "v11.0.0-beta" + return "v12.4.0-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go index c486f63ced50..2a9a4d28930f 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go @@ -90,7 +90,9 @@ func (client VirtualMachineExtensionImagesClient) GetPreparer(location string, p // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionImagesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -155,7 +157,9 @@ func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(location str // ListTypesSender sends the ListTypes request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionImagesClient) ListTypesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListTypesResponder handles the response to the ListTypes request. The method always @@ -230,7 +234,9 @@ func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(location // ListVersionsSender sends the ListVersions request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionImagesClient) ListVersionsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListVersionsResponder handles the response to the ListVersions request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go index e8f0b27f1b00..4b8af16c9018 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go @@ -109,6 +109,7 @@ func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(resourceGrou func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -192,6 +193,7 @@ func (client VirtualMachineExtensionsClient) DeletePreparer(resourceGroupName st func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -263,7 +265,9 @@ func (client VirtualMachineExtensionsClient) GetPreparer(resourceGroupName strin // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go index 9eda416e9584..2e1e20b04424 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go @@ -91,7 +91,9 @@ func (client VirtualMachineImagesClient) GetPreparer(location string, publisherN // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -168,7 +170,9 @@ func (client VirtualMachineImagesClient) ListPreparer(location string, publisher // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -233,7 +237,9 @@ func (client VirtualMachineImagesClient) ListOffersPreparer(location string, pub // ListOffersSender sends the ListOffers request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListOffersSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListOffersResponder handles the response to the ListOffers request. The method always @@ -297,7 +303,9 @@ func (client VirtualMachineImagesClient) ListPublishersPreparer(location string) // ListPublishersSender sends the ListPublishers request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListPublishersSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListPublishersResponder handles the response to the ListPublishers request. The method always @@ -364,7 +372,9 @@ func (client VirtualMachineImagesClient) ListSkusPreparer(location string, publi // ListSkusSender sends the ListSkus request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineImagesClient) ListSkusSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListSkusResponder handles the response to the ListSkus request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineruncommands.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineruncommands.go index f422c92f58c0..287566a6a98f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineruncommands.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineruncommands.go @@ -94,7 +94,9 @@ func (client VirtualMachineRunCommandsClient) GetPreparer(location string, comma // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineRunCommandsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -164,7 +166,9 @@ func (client VirtualMachineRunCommandsClient) ListPreparer(location string) (*ht // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineRunCommandsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go index ad1d829d14dc..086463240a42 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go @@ -119,6 +119,7 @@ func (client VirtualMachinesClient) CapturePreparer(resourceGroupName string, VM func (client VirtualMachinesClient) CaptureSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -201,6 +202,7 @@ func (client VirtualMachinesClient) ConvertToManagedDisksPreparer(resourceGroupN func (client VirtualMachinesClient) ConvertToManagedDisksSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -310,6 +312,7 @@ func (client VirtualMachinesClient) CreateOrUpdatePreparer(resourceGroupName str func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -392,6 +395,7 @@ func (client VirtualMachinesClient) DeallocatePreparer(resourceGroupName string, func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -473,6 +477,7 @@ func (client VirtualMachinesClient) DeletePreparer(resourceGroupName string, VMN func (client VirtualMachinesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -538,7 +543,9 @@ func (client VirtualMachinesClient) GeneralizePreparer(resourceGroupName string, // GeneralizeSender sends the Generalize request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) GeneralizeSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GeneralizeResponder handles the response to the Generalize request. The method always @@ -607,7 +614,9 @@ func (client VirtualMachinesClient) GetPreparer(resourceGroupName string, VMName // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -672,7 +681,9 @@ func (client VirtualMachinesClient) InstanceViewPreparer(resourceGroupName strin // InstanceViewSender sends the InstanceView request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) InstanceViewSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // InstanceViewResponder handles the response to the InstanceView request. The method always @@ -737,7 +748,9 @@ func (client VirtualMachinesClient) ListPreparer(resourceGroupName string) (*htt // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -868,7 +881,9 @@ func (client VirtualMachinesClient) ListAllPreparer() (*http.Request, error) { // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -1002,7 +1017,9 @@ func (client VirtualMachinesClient) ListAvailableSizesPreparer(resourceGroupName // ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachinesClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always @@ -1084,6 +1101,7 @@ func (client VirtualMachinesClient) PerformMaintenancePreparer(resourceGroupName func (client VirtualMachinesClient) PerformMaintenanceSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1167,6 +1185,7 @@ func (client VirtualMachinesClient) PowerOffPreparer(resourceGroupName string, V func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1249,6 +1268,7 @@ func (client VirtualMachinesClient) RedeployPreparer(resourceGroupName string, V func (client VirtualMachinesClient) RedeploySender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1330,6 +1350,7 @@ func (client VirtualMachinesClient) RestartPreparer(resourceGroupName string, VM func (client VirtualMachinesClient) RestartSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1423,6 +1444,7 @@ func (client VirtualMachinesClient) RunCommandPreparer(resourceGroupName string, func (client VirtualMachinesClient) RunCommandSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1504,6 +1526,7 @@ func (client VirtualMachinesClient) StartPreparer(resourceGroupName string, VMNa func (client VirtualMachinesClient) StartSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetextensions.go index 9a198f148d20..882339202b15 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetextensions.go @@ -110,6 +110,7 @@ func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdatePreparer(reso func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -193,6 +194,7 @@ func (client VirtualMachineScaleSetExtensionsClient) DeletePreparer(resourceGrou func (client VirtualMachineScaleSetExtensionsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -264,7 +266,9 @@ func (client VirtualMachineScaleSetExtensionsClient) GetPreparer(resourceGroupNa // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -330,7 +334,9 @@ func (client VirtualMachineScaleSetExtensionsClient) ListPreparer(resourceGroupN // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetExtensionsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetrollingupgrades.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetrollingupgrades.go index 45cc6e5c51cb..230b5da939c1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetrollingupgrades.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetrollingupgrades.go @@ -106,6 +106,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) CancelPreparer(resourc func (client VirtualMachineScaleSetRollingUpgradesClient) CancelSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -171,7 +172,9 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestPreparer(reso // GetLatestSender sends the GetLatest request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetLatestResponder handles the response to the GetLatest request. The method always @@ -254,6 +257,7 @@ func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradePreparer func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradeSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go index 72a868579acb..64c5c6d7a17f 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go @@ -133,6 +133,7 @@ func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(resourceGroup func (client VirtualMachineScaleSetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -222,6 +223,7 @@ func (client VirtualMachineScaleSetsClient) DeallocatePreparer(resourceGroupName func (client VirtualMachineScaleSetsClient) DeallocateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -303,6 +305,7 @@ func (client VirtualMachineScaleSetsClient) DeletePreparer(resourceGroupName str func (client VirtualMachineScaleSetsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -397,6 +400,7 @@ func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(resourceGrou func (client VirtualMachineScaleSetsClient) DeleteInstancesSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -462,7 +466,9 @@ func (client VirtualMachineScaleSetsClient) GetPreparer(resourceGroupName string // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -527,7 +533,9 @@ func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(resourceGrou // GetInstanceViewSender sends the GetInstanceView request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetInstanceViewResponder handles the response to the GetInstanceView request. The method always @@ -591,7 +599,9 @@ func (client VirtualMachineScaleSetsClient) ListPreparer(resourceGroupName strin // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -723,7 +733,9 @@ func (client VirtualMachineScaleSetsClient) ListAllPreparer() (*http.Request, er // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -858,7 +870,9 @@ func (client VirtualMachineScaleSetsClient) ListSkusPreparer(resourceGroupName s // ListSkusSender sends the ListSkus request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetsClient) ListSkusSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListSkusResponder handles the response to the ListSkus request. The method always @@ -1016,6 +1030,7 @@ func (client VirtualMachineScaleSetsClient) PowerOffPreparer(resourceGroupName s func (client VirtualMachineScaleSetsClient) PowerOffSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1104,6 +1119,7 @@ func (client VirtualMachineScaleSetsClient) ReimagePreparer(resourceGroupName st func (client VirtualMachineScaleSetsClient) ReimageSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1192,6 +1208,7 @@ func (client VirtualMachineScaleSetsClient) ReimageAllPreparer(resourceGroupName func (client VirtualMachineScaleSetsClient) ReimageAllSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1280,6 +1297,7 @@ func (client VirtualMachineScaleSetsClient) RestartPreparer(resourceGroupName st func (client VirtualMachineScaleSetsClient) RestartSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1368,6 +1386,7 @@ func (client VirtualMachineScaleSetsClient) StartPreparer(resourceGroupName stri func (client VirtualMachineScaleSetsClient) StartSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1452,6 +1471,7 @@ func (client VirtualMachineScaleSetsClient) UpdatePreparer(resourceGroupName str func (client VirtualMachineScaleSetsClient) UpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1546,6 +1566,7 @@ func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(resourceGrou func (client VirtualMachineScaleSetsClient) UpdateInstancesSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go index 5a15edae9131..0be36c9ac63a 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go @@ -107,6 +107,7 @@ func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(resourceGroupNa func (client VirtualMachineScaleSetVMsClient) DeallocateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -191,6 +192,7 @@ func (client VirtualMachineScaleSetVMsClient) DeletePreparer(resourceGroupName s func (client VirtualMachineScaleSetVMsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -258,7 +260,9 @@ func (client VirtualMachineScaleSetVMsClient) GetPreparer(resourceGroupName stri // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -325,7 +329,9 @@ func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(resourceGr // GetInstanceViewSender sends the GetInstanceView request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetInstanceViewResponder handles the response to the GetInstanceView request. The method always @@ -401,7 +407,9 @@ func (client VirtualMachineScaleSetVMsClient) ListPreparer(resourceGroupName str // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineScaleSetVMsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -555,6 +563,7 @@ func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(resourceGroupName func (client VirtualMachineScaleSetVMsClient) PowerOffSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -639,6 +648,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(resourceGroupName func (client VirtualMachineScaleSetVMsClient) ReimageSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -723,6 +733,7 @@ func (client VirtualMachineScaleSetVMsClient) ReimageAllPreparer(resourceGroupNa func (client VirtualMachineScaleSetVMsClient) ReimageAllSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -807,6 +818,7 @@ func (client VirtualMachineScaleSetVMsClient) RestartPreparer(resourceGroupName func (client VirtualMachineScaleSetVMsClient) RestartSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -890,6 +902,7 @@ func (client VirtualMachineScaleSetVMsClient) StartPreparer(resourceGroupName st func (client VirtualMachineScaleSetVMsClient) StartSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go index dc2f2778bea2..0833578d5a6c 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go @@ -93,7 +93,9 @@ func (client VirtualMachineSizesClient) ListPreparer(location string) (*http.Req // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualMachineSizesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go index b3eea6cb2f49..08485cae6140 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/client.go @@ -1,6 +1,6 @@ // Package containerregistry implements the Azure ARM Containerregistry service API version 2017-10-01. // -// +// Deprecated: Please instead use github.com/Azure/azure-sdk-for-go/services/containerregistry/mgmt/2017-10-01/containerregistry package containerregistry // Copyright (c) Microsoft and contributors. All rights reserved. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/operations.go index a9143e739664..5a5aa26f42c3 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/operations.go @@ -79,7 +79,9 @@ func (client OperationsClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go index c1c9af59e838..955b6ae1c439 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/registries.go @@ -100,7 +100,9 @@ func (client RegistriesClient) CheckNameAvailabilityPreparer(registryNameCheckRe // CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the // http.Response Body if it receives an error. func (client RegistriesClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always @@ -202,6 +204,7 @@ func (client RegistriesClient) CreatePreparer(resourceGroupName string, registry func (client RegistriesClient) CreateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -295,6 +298,7 @@ func (client RegistriesClient) DeletePreparer(resourceGroupName string, registry func (client RegistriesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -368,7 +372,9 @@ func (client RegistriesClient) GetPreparer(resourceGroupName string, registryNam // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RegistriesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -429,7 +435,9 @@ func (client RegistriesClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client RegistriesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -562,7 +570,9 @@ func (client RegistriesClient) ListByResourceGroupPreparer(resourceGroupName str // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client RegistriesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -705,7 +715,9 @@ func (client RegistriesClient) ListCredentialsPreparer(resourceGroupName string, // ListCredentialsSender sends the ListCredentials request. The method will close the // http.Response Body if it receives an error. func (client RegistriesClient) ListCredentialsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListCredentialsResponder handles the response to the ListCredentials request. The method always @@ -779,7 +791,9 @@ func (client RegistriesClient) ListUsagesPreparer(resourceGroupName string, regi // ListUsagesSender sends the ListUsages request. The method will close the // http.Response Body if it receives an error. func (client RegistriesClient) ListUsagesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListUsagesResponder handles the response to the ListUsages request. The method always @@ -856,7 +870,9 @@ func (client RegistriesClient) RegenerateCredentialPreparer(resourceGroupName st // RegenerateCredentialSender sends the RegenerateCredential request. The method will close the // http.Response Body if it receives an error. func (client RegistriesClient) RegenerateCredentialSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // RegenerateCredentialResponder handles the response to the RegenerateCredential request. The method always @@ -952,6 +968,7 @@ func (client RegistriesClient) UpdatePreparer(resourceGroupName string, registry func (client RegistriesClient) UpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/replications.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/replications.go index 79fce2d21ee8..5b82038ef1d7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/replications.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/replications.go @@ -125,6 +125,7 @@ func (client ReplicationsClient) CreatePreparer(resourceGroupName string, regist func (client ReplicationsClient) CreateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -224,6 +225,7 @@ func (client ReplicationsClient) DeletePreparer(resourceGroupName string, regist func (client ReplicationsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -302,7 +304,9 @@ func (client ReplicationsClient) GetPreparer(resourceGroupName string, registryN // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ReplicationsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -376,7 +380,9 @@ func (client ReplicationsClient) ListPreparer(resourceGroupName string, registry // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ReplicationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -547,6 +553,7 @@ func (client ReplicationsClient) UpdatePreparer(resourceGroupName string, regist func (client ReplicationsClient) UpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go index 6e3ee863831d..57af0d04a4de 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/version.go @@ -19,10 +19,10 @@ package containerregistry // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/v11.1.0-beta arm-containerregistry/2017-10-01" + return "Azure-SDK-For-Go/v12.4.0-beta arm-containerregistry/2017-10-01" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return "v11.1.0-beta" + return "v12.4.0-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/webhooks.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/webhooks.go index ff5f9ce7844d..e61ce83a9739 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/webhooks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry/webhooks.go @@ -131,6 +131,7 @@ func (client WebhooksClient) CreatePreparer(resourceGroupName string, registryNa func (client WebhooksClient) CreateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -229,6 +230,7 @@ func (client WebhooksClient) DeletePreparer(resourceGroupName string, registryNa func (client WebhooksClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -307,7 +309,9 @@ func (client WebhooksClient) GetPreparer(resourceGroupName string, registryName // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client WebhooksClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -386,7 +390,9 @@ func (client WebhooksClient) GetCallbackConfigPreparer(resourceGroupName string, // GetCallbackConfigSender sends the GetCallbackConfig request. The method will close the // http.Response Body if it receives an error. func (client WebhooksClient) GetCallbackConfigSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetCallbackConfigResponder handles the response to the GetCallbackConfig request. The method always @@ -460,7 +466,9 @@ func (client WebhooksClient) ListPreparer(resourceGroupName string, registryName // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client WebhooksClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -608,7 +616,9 @@ func (client WebhooksClient) ListEventsPreparer(resourceGroupName string, regist // ListEventsSender sends the ListEvents request. The method will close the // http.Response Body if it receives an error. func (client WebhooksClient) ListEventsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListEventsResponder handles the response to the ListEvents request. The method always @@ -756,7 +766,9 @@ func (client WebhooksClient) PingPreparer(resourceGroupName string, registryName // PingSender sends the Ping request. The method will close the // http.Response Body if it receives an error. func (client WebhooksClient) PingSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // PingResponder handles the response to the Ping request. The method always @@ -858,6 +870,7 @@ func (client WebhooksClient) UpdatePreparer(resourceGroupName string, registryNa func (client WebhooksClient) UpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/client.go index 8bab7acc1324..54634794d677 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/client.go @@ -2,6 +2,8 @@ // 2016-04-30-preview. // // The Disk Resource Provider Client. +// +// Deprecated: Please instead use github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2016-04-30-preview/compute package disk // Copyright (c) Microsoft and contributors. All rights reserved. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go index 11c4a35ee9c3..2981a37ce5bb 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go @@ -19,10 +19,10 @@ package disk // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/v10.2.0-beta arm-disk/2016-04-30-preview" + return "Azure-SDK-For-Go/v12.4.0-beta arm-disk/2016-04-30-preview" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return "v10.2.0-beta" + return "v12.4.0-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go index c000be117bcd..0d1605ac2975 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go @@ -109,6 +109,7 @@ func (client ApplicationGatewaysClient) BackendHealthPreparer(resourceGroupName func (client ApplicationGatewaysClient) BackendHealthSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -209,6 +210,7 @@ func (client ApplicationGatewaysClient) CreateOrUpdatePreparer(resourceGroupName func (client ApplicationGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -290,6 +292,7 @@ func (client ApplicationGatewaysClient) DeletePreparer(resourceGroupName string, func (client ApplicationGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -354,7 +357,9 @@ func (client ApplicationGatewaysClient) GetPreparer(resourceGroupName string, ap // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -418,7 +423,9 @@ func (client ApplicationGatewaysClient) GetSslPredefinedPolicyPreparer(predefine // GetSslPredefinedPolicySender sends the GetSslPredefinedPolicy request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) GetSslPredefinedPolicySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetSslPredefinedPolicyResponder handles the response to the GetSslPredefinedPolicy request. The method always @@ -482,7 +489,9 @@ func (client ApplicationGatewaysClient) ListPreparer(resourceGroupName string) ( // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -612,7 +621,9 @@ func (client ApplicationGatewaysClient) ListAllPreparer() (*http.Request, error) // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -742,7 +753,9 @@ func (client ApplicationGatewaysClient) ListAvailableSslOptionsPreparer() (*http // ListAvailableSslOptionsSender sends the ListAvailableSslOptions request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) ListAvailableSslOptionsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAvailableSslOptionsResponder handles the response to the ListAvailableSslOptions request. The method always @@ -803,7 +816,9 @@ func (client ApplicationGatewaysClient) ListAvailableSslPredefinedPoliciesPrepar // ListAvailableSslPredefinedPoliciesSender sends the ListAvailableSslPredefinedPolicies request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) ListAvailableSslPredefinedPoliciesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAvailableSslPredefinedPoliciesResponder handles the response to the ListAvailableSslPredefinedPolicies request. The method always @@ -933,7 +948,9 @@ func (client ApplicationGatewaysClient) ListAvailableWafRuleSetsPreparer() (*htt // ListAvailableWafRuleSetsSender sends the ListAvailableWafRuleSets request. The method will close the // http.Response Body if it receives an error. func (client ApplicationGatewaysClient) ListAvailableWafRuleSetsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAvailableWafRuleSetsResponder handles the response to the ListAvailableWafRuleSets request. The method always @@ -1014,6 +1031,7 @@ func (client ApplicationGatewaysClient) StartPreparer(resourceGroupName string, func (client ApplicationGatewaysClient) StartSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1095,6 +1113,7 @@ func (client ApplicationGatewaysClient) StopPreparer(resourceGroupName string, a func (client ApplicationGatewaysClient) StopSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationsecuritygroups.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationsecuritygroups.go index 1e84e706466f..954b113d4228 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationsecuritygroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationsecuritygroups.go @@ -107,6 +107,7 @@ func (client ApplicationSecurityGroupsClient) CreateOrUpdatePreparer(resourceGro func (client ApplicationSecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -190,6 +191,7 @@ func (client ApplicationSecurityGroupsClient) DeletePreparer(resourceGroupName s func (client ApplicationSecurityGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -255,7 +257,9 @@ func (client ApplicationSecurityGroupsClient) GetPreparer(resourceGroupName stri // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ApplicationSecurityGroupsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -319,7 +323,9 @@ func (client ApplicationSecurityGroupsClient) ListPreparer(resourceGroupName str // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ApplicationSecurityGroupsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -449,7 +455,9 @@ func (client ApplicationSecurityGroupsClient) ListAllPreparer() (*http.Request, // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client ApplicationSecurityGroupsClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/availableendpointservices.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/availableendpointservices.go index 26bcaf8b647c..a010418e9ded 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/availableendpointservices.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/availableendpointservices.go @@ -86,7 +86,9 @@ func (client AvailableEndpointServicesClient) ListPreparer(location string) (*ht // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client AvailableEndpointServicesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/bgpservicecommunities.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/bgpservicecommunities.go index 10cfa2cdd558..5c4f4786a90e 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/bgpservicecommunities.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/bgpservicecommunities.go @@ -83,7 +83,9 @@ func (client BgpServiceCommunitiesClient) ListPreparer() (*http.Request, error) // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client BgpServiceCommunitiesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go index a3576c393aa9..ea0952c7ca9b 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go @@ -1,6 +1,8 @@ // Package network implements the Azure ARM Network service API version . // // Network Client +// +// Deprecated: Please instead use github.com/Azure/azure-sdk-for-go/services/network/mgmt/2017-09-01/network package network // Copyright (c) Microsoft and contributors. All rights reserved. @@ -102,7 +104,9 @@ func (client ManagementClient) CheckDNSNameAvailabilityPreparer(location string, // CheckDNSNameAvailabilitySender sends the CheckDNSNameAvailability request. The method will close the // http.Response Body if it receives an error. func (client ManagementClient) CheckDNSNameAvailabilitySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // CheckDNSNameAvailabilityResponder handles the response to the CheckDNSNameAvailability request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/defaultsecurityrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/defaultsecurityrules.go index 9a3ded19d5c2..1fd3e18d92aa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/defaultsecurityrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/defaultsecurityrules.go @@ -89,7 +89,9 @@ func (client DefaultSecurityRulesClient) GetPreparer(resourceGroupName string, n // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client DefaultSecurityRulesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -155,7 +157,9 @@ func (client DefaultSecurityRulesClient) ListPreparer(resourceGroupName string, // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client DefaultSecurityRulesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go index 9b7fd524e73c..3ea822813dc1 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go @@ -111,6 +111,7 @@ func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdatePreparer(res func (client ExpressRouteCircuitAuthorizationsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -195,6 +196,7 @@ func (client ExpressRouteCircuitAuthorizationsClient) DeletePreparer(resourceGro func (client ExpressRouteCircuitAuthorizationsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -261,7 +263,9 @@ func (client ExpressRouteCircuitAuthorizationsClient) GetPreparer(resourceGroupN // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitAuthorizationsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -326,7 +330,9 @@ func (client ExpressRouteCircuitAuthorizationsClient) ListPreparer(resourceGroup // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitAuthorizationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go index af9fc67e0394..c454167020dc 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go @@ -109,6 +109,7 @@ func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdatePreparer(resourceG func (client ExpressRouteCircuitPeeringsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -193,6 +194,7 @@ func (client ExpressRouteCircuitPeeringsClient) DeletePreparer(resourceGroupName func (client ExpressRouteCircuitPeeringsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -259,7 +261,9 @@ func (client ExpressRouteCircuitPeeringsClient) GetPreparer(resourceGroupName st // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitPeeringsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -324,7 +328,9 @@ func (client ExpressRouteCircuitPeeringsClient) ListPreparer(resourceGroupName s // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitPeeringsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go index 63cfeff9e0a1..8a497d166afc 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go @@ -107,6 +107,7 @@ func (client ExpressRouteCircuitsClient) CreateOrUpdatePreparer(resourceGroupNam func (client ExpressRouteCircuitsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -188,6 +189,7 @@ func (client ExpressRouteCircuitsClient) DeletePreparer(resourceGroupName string func (client ExpressRouteCircuitsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -252,7 +254,9 @@ func (client ExpressRouteCircuitsClient) GetPreparer(resourceGroupName string, c // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -319,7 +323,9 @@ func (client ExpressRouteCircuitsClient) GetPeeringStatsPreparer(resourceGroupNa // GetPeeringStatsSender sends the GetPeeringStats request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) GetPeeringStatsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetPeeringStatsResponder handles the response to the GetPeeringStats request. The method always @@ -384,7 +390,9 @@ func (client ExpressRouteCircuitsClient) GetStatsPreparer(resourceGroupName stri // GetStatsSender sends the GetStats request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) GetStatsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetStatsResponder handles the response to the GetStats request. The method always @@ -448,7 +456,9 @@ func (client ExpressRouteCircuitsClient) ListPreparer(resourceGroupName string) // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -578,7 +588,9 @@ func (client ExpressRouteCircuitsClient) ListAllPreparer() (*http.Request, error // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteCircuitsClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -732,6 +744,7 @@ func (client ExpressRouteCircuitsClient) ListArpTablePreparer(resourceGroupName func (client ExpressRouteCircuitsClient) ListArpTableSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -817,6 +830,7 @@ func (client ExpressRouteCircuitsClient) ListRoutesTablePreparer(resourceGroupNa func (client ExpressRouteCircuitsClient) ListRoutesTableSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -902,6 +916,7 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableSummaryPreparer(resource func (client ExpressRouteCircuitsClient) ListRoutesTableSummarySender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go index 5e39087b2a5c..ccd76e9968d4 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go @@ -84,7 +84,9 @@ func (client ExpressRouteServiceProvidersClient) ListPreparer() (*http.Request, // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteServiceProvidersClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/inboundnatrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/inboundnatrules.go index eab80ac57669..8a43aee1b914 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/inboundnatrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/inboundnatrules.go @@ -132,6 +132,7 @@ func (client InboundNatRulesClient) CreateOrUpdatePreparer(resourceGroupName str func (client InboundNatRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -216,6 +217,7 @@ func (client InboundNatRulesClient) DeletePreparer(resourceGroupName string, loa func (client InboundNatRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -285,7 +287,9 @@ func (client InboundNatRulesClient) GetPreparer(resourceGroupName string, loadBa // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client InboundNatRulesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -350,7 +354,9 @@ func (client InboundNatRulesClient) ListPreparer(resourceGroupName string, loadB // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client InboundNatRulesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceipconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceipconfigurations.go index ca0359f09006..4885110b27f9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceipconfigurations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceipconfigurations.go @@ -89,7 +89,9 @@ func (client InterfaceIPConfigurationsClient) GetPreparer(resourceGroupName stri // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client InterfaceIPConfigurationsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -154,7 +156,9 @@ func (client InterfaceIPConfigurationsClient) ListPreparer(resourceGroupName str // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client InterfaceIPConfigurationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceloadbalancers.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceloadbalancers.go index c7b7d272f729..e9f00e491e50 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceloadbalancers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaceloadbalancers.go @@ -87,7 +87,9 @@ func (client InterfaceLoadBalancersClient) ListPreparer(resourceGroupName string // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client InterfaceLoadBalancersClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go index c2fdb3e901b7..cfe3238c53aa 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go @@ -107,6 +107,7 @@ func (client InterfacesClient) CreateOrUpdatePreparer(resourceGroupName string, func (client InterfacesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -188,6 +189,7 @@ func (client InterfacesClient) DeletePreparer(resourceGroupName string, networkI func (client InterfacesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -256,7 +258,9 @@ func (client InterfacesClient) GetPreparer(resourceGroupName string, networkInte // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -338,6 +342,7 @@ func (client InterfacesClient) GetEffectiveRouteTablePreparer(resourceGroupName func (client InterfacesClient) GetEffectiveRouteTableSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -410,7 +415,9 @@ func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfacePreparer // GetVirtualMachineScaleSetNetworkInterfaceSender sends the GetVirtualMachineScaleSetNetworkInterface request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetVirtualMachineScaleSetNetworkInterfaceResponder handles the response to the GetVirtualMachineScaleSetNetworkInterface request. The method always @@ -474,7 +481,9 @@ func (client InterfacesClient) ListPreparer(resourceGroupName string) (*http.Req // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -604,7 +613,9 @@ func (client InterfacesClient) ListAllPreparer() (*http.Request, error) { // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -755,6 +766,7 @@ func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsPreparer(resour func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -821,7 +833,9 @@ func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesPrepar // ListVirtualMachineScaleSetNetworkInterfacesSender sends the ListVirtualMachineScaleSetNetworkInterfaces request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) ListVirtualMachineScaleSetNetworkInterfacesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListVirtualMachineScaleSetNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetNetworkInterfaces request. The method always @@ -958,7 +972,9 @@ func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesPrep // ListVirtualMachineScaleSetVMNetworkInterfacesSender sends the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method will close the // http.Response Body if it receives an error. func (client InterfacesClient) ListVirtualMachineScaleSetVMNetworkInterfacesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListVirtualMachineScaleSetVMNetworkInterfacesResponder handles the response to the ListVirtualMachineScaleSetVMNetworkInterfaces request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerbackendaddresspools.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerbackendaddresspools.go index de14c3556084..e15b18fc5835 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerbackendaddresspools.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerbackendaddresspools.go @@ -90,7 +90,9 @@ func (client LoadBalancerBackendAddressPoolsClient) GetPreparer(resourceGroupNam // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerBackendAddressPoolsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -155,7 +157,9 @@ func (client LoadBalancerBackendAddressPoolsClient) ListPreparer(resourceGroupNa // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerBackendAddressPoolsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerfrontendipconfigurations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerfrontendipconfigurations.go index 515b875a1831..7ba6f72fb532 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerfrontendipconfigurations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerfrontendipconfigurations.go @@ -91,7 +91,9 @@ func (client LoadBalancerFrontendIPConfigurationsClient) GetPreparer(resourceGro // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerFrontendIPConfigurationsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -156,7 +158,9 @@ func (client LoadBalancerFrontendIPConfigurationsClient) ListPreparer(resourceGr // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerFrontendIPConfigurationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerloadbalancingrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerloadbalancingrules.go index 2ecd6be0d4f0..4a4747ccda11 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerloadbalancingrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerloadbalancingrules.go @@ -90,7 +90,9 @@ func (client LoadBalancerLoadBalancingRulesClient) GetPreparer(resourceGroupName // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerLoadBalancingRulesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -155,7 +157,9 @@ func (client LoadBalancerLoadBalancingRulesClient) ListPreparer(resourceGroupNam // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerLoadBalancingRulesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancernetworkinterfaces.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancernetworkinterfaces.go index 35650354255b..def71135275e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancernetworkinterfaces.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancernetworkinterfaces.go @@ -88,7 +88,9 @@ func (client LoadBalancerNetworkInterfacesClient) ListPreparer(resourceGroupName // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerNetworkInterfacesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerprobes.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerprobes.go index daee51e0f355..2b43d041b026 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerprobes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancerprobes.go @@ -89,7 +89,9 @@ func (client LoadBalancerProbesClient) GetPreparer(resourceGroupName string, loa // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerProbesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -154,7 +156,9 @@ func (client LoadBalancerProbesClient) ListPreparer(resourceGroupName string, lo // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancerProbesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go index 3a2528d85f4b..980478937e8d 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go @@ -106,6 +106,7 @@ func (client LoadBalancersClient) CreateOrUpdatePreparer(resourceGroupName strin func (client LoadBalancersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -187,6 +188,7 @@ func (client LoadBalancersClient) DeletePreparer(resourceGroupName string, loadB func (client LoadBalancersClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -255,7 +257,9 @@ func (client LoadBalancersClient) GetPreparer(resourceGroupName string, loadBala // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancersClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -319,7 +323,9 @@ func (client LoadBalancersClient) ListPreparer(resourceGroupName string) (*http. // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancersClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -449,7 +455,9 @@ func (client LoadBalancersClient) ListAllPreparer() (*http.Request, error) { // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client LoadBalancersClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go index 64cdea20a72b..5431d244efe9 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go @@ -119,6 +119,7 @@ func (client LocalNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupNam func (client LocalNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -210,6 +211,7 @@ func (client LocalNetworkGatewaysClient) DeletePreparer(resourceGroupName string func (client LocalNetworkGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -281,7 +283,9 @@ func (client LocalNetworkGatewaysClient) GetPreparer(resourceGroupName string, l // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client LocalNetworkGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -345,7 +349,9 @@ func (client LocalNetworkGatewaysClient) ListPreparer(resourceGroupName string) // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client LocalNetworkGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/packetcaptures.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/packetcaptures.go index 1ad97a5e2aa1..d089ca61dbb6 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/packetcaptures.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/packetcaptures.go @@ -122,6 +122,7 @@ func (client PacketCapturesClient) CreatePreparer(resourceGroupName string, netw func (client PacketCapturesClient) CreateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -205,6 +206,7 @@ func (client PacketCapturesClient) DeletePreparer(resourceGroupName string, netw func (client PacketCapturesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -271,7 +273,9 @@ func (client PacketCapturesClient) GetPreparer(resourceGroupName string, network // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client PacketCapturesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -355,6 +359,7 @@ func (client PacketCapturesClient) GetStatusPreparer(resourceGroupName string, n func (client PacketCapturesClient) GetStatusSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -420,7 +425,9 @@ func (client PacketCapturesClient) ListPreparer(resourceGroupName string, networ // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client PacketCapturesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -503,6 +510,7 @@ func (client PacketCapturesClient) StopPreparer(resourceGroupName string, networ func (client PacketCapturesClient) StopSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go index af7e7ec85b66..63733fbd4824 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go @@ -122,6 +122,7 @@ func (client PublicIPAddressesClient) CreateOrUpdatePreparer(resourceGroupName s func (client PublicIPAddressesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -203,6 +204,7 @@ func (client PublicIPAddressesClient) DeletePreparer(resourceGroupName string, p func (client PublicIPAddressesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -271,7 +273,9 @@ func (client PublicIPAddressesClient) GetPreparer(resourceGroupName string, publ // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -346,7 +350,9 @@ func (client PublicIPAddressesClient) GetVirtualMachineScaleSetPublicIPAddressPr // GetVirtualMachineScaleSetPublicIPAddressSender sends the GetVirtualMachineScaleSetPublicIPAddress request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) GetVirtualMachineScaleSetPublicIPAddressSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetVirtualMachineScaleSetPublicIPAddressResponder handles the response to the GetVirtualMachineScaleSetPublicIPAddress request. The method always @@ -410,7 +416,9 @@ func (client PublicIPAddressesClient) ListPreparer(resourceGroupName string) (*h // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -540,7 +548,9 @@ func (client PublicIPAddressesClient) ListAllPreparer() (*http.Request, error) { // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -676,7 +686,9 @@ func (client PublicIPAddressesClient) ListVirtualMachineScaleSetPublicIPAddresse // ListVirtualMachineScaleSetPublicIPAddressesSender sends the ListVirtualMachineScaleSetPublicIPAddresses request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) ListVirtualMachineScaleSetPublicIPAddressesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListVirtualMachineScaleSetPublicIPAddressesResponder handles the response to the ListVirtualMachineScaleSetPublicIPAddresses request. The method always @@ -816,7 +828,9 @@ func (client PublicIPAddressesClient) ListVirtualMachineScaleSetVMPublicIPAddres // ListVirtualMachineScaleSetVMPublicIPAddressesSender sends the ListVirtualMachineScaleSetVMPublicIPAddresses request. The method will close the // http.Response Body if it receives an error. func (client PublicIPAddressesClient) ListVirtualMachineScaleSetVMPublicIPAddressesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListVirtualMachineScaleSetVMPublicIPAddressesResponder handles the response to the ListVirtualMachineScaleSetVMPublicIPAddresses request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilterrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilterrules.go index 7c0ab17904c8..b3dc3883bec4 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilterrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilterrules.go @@ -122,6 +122,7 @@ func (client RouteFilterRulesClient) CreateOrUpdatePreparer(resourceGroupName st func (client RouteFilterRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -206,6 +207,7 @@ func (client RouteFilterRulesClient) DeletePreparer(resourceGroupName string, ro func (client RouteFilterRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -272,7 +274,9 @@ func (client RouteFilterRulesClient) GetPreparer(resourceGroupName string, route // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RouteFilterRulesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -337,7 +341,9 @@ func (client RouteFilterRulesClient) ListByRouteFilterPreparer(resourceGroupName // ListByRouteFilterSender sends the ListByRouteFilter request. The method will close the // http.Response Body if it receives an error. func (client RouteFilterRulesClient) ListByRouteFilterSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListByRouteFilterResponder handles the response to the ListByRouteFilter request. The method always @@ -493,6 +499,7 @@ func (client RouteFilterRulesClient) UpdatePreparer(resourceGroupName string, ro func (client RouteFilterRulesClient) UpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilters.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilters.go index 711e58fc9c98..0d35b6d9c06c 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilters.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routefilters.go @@ -107,6 +107,7 @@ func (client RouteFiltersClient) CreateOrUpdatePreparer(resourceGroupName string func (client RouteFiltersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -188,6 +189,7 @@ func (client RouteFiltersClient) DeletePreparer(resourceGroupName string, routeF func (client RouteFiltersClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -256,7 +258,9 @@ func (client RouteFiltersClient) GetPreparer(resourceGroupName string, routeFilt // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RouteFiltersClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -317,7 +321,9 @@ func (client RouteFiltersClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client RouteFiltersClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -450,7 +456,9 @@ func (client RouteFiltersClient) ListByResourceGroupPreparer(resourceGroupName s // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client RouteFiltersClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -604,6 +612,7 @@ func (client RouteFiltersClient) UpdatePreparer(resourceGroupName string, routeF func (client RouteFiltersClient) UpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go index 0dcde6fa01d0..48d11f3afce2 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go @@ -108,6 +108,7 @@ func (client RoutesClient) CreateOrUpdatePreparer(resourceGroupName string, rout func (client RoutesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -192,6 +193,7 @@ func (client RoutesClient) DeletePreparer(resourceGroupName string, routeTableNa func (client RoutesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -258,7 +260,9 @@ func (client RoutesClient) GetPreparer(resourceGroupName string, routeTableName // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RoutesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -323,7 +327,9 @@ func (client RoutesClient) ListPreparer(resourceGroupName string, routeTableName // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client RoutesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go index e9b557bc0b37..3a2f4ca69e6c 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go @@ -107,6 +107,7 @@ func (client RouteTablesClient) CreateOrUpdatePreparer(resourceGroupName string, func (client RouteTablesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -188,6 +189,7 @@ func (client RouteTablesClient) DeletePreparer(resourceGroupName string, routeTa func (client RouteTablesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -256,7 +258,9 @@ func (client RouteTablesClient) GetPreparer(resourceGroupName string, routeTable // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client RouteTablesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -320,7 +324,9 @@ func (client RouteTablesClient) ListPreparer(resourceGroupName string) (*http.Re // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client RouteTablesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -450,7 +456,9 @@ func (client RouteTablesClient) ListAllPreparer() (*http.Request, error) { // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client RouteTablesClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go index b1a5ae4a6e29..3606e6d82c89 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go @@ -107,6 +107,7 @@ func (client SecurityGroupsClient) CreateOrUpdatePreparer(resourceGroupName stri func (client SecurityGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -189,6 +190,7 @@ func (client SecurityGroupsClient) DeletePreparer(resourceGroupName string, netw func (client SecurityGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -257,7 +259,9 @@ func (client SecurityGroupsClient) GetPreparer(resourceGroupName string, network // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client SecurityGroupsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -321,7 +325,9 @@ func (client SecurityGroupsClient) ListPreparer(resourceGroupName string) (*http // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client SecurityGroupsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -451,7 +457,9 @@ func (client SecurityGroupsClient) ListAllPreparer() (*http.Request, error) { // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client SecurityGroupsClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go index 9c76ef54e8b9..6fcecae5f9c1 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go @@ -122,6 +122,7 @@ func (client SecurityRulesClient) CreateOrUpdatePreparer(resourceGroupName strin func (client SecurityRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -205,6 +206,7 @@ func (client SecurityRulesClient) DeletePreparer(resourceGroupName string, netwo func (client SecurityRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -271,7 +273,9 @@ func (client SecurityRulesClient) GetPreparer(resourceGroupName string, networkS // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client SecurityRulesClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -337,7 +341,9 @@ func (client SecurityRulesClient) ListPreparer(resourceGroupName string, network // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client SecurityRulesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go index 62a1789e591d..298cb989ffa8 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go @@ -109,6 +109,7 @@ func (client SubnetsClient) CreateOrUpdatePreparer(resourceGroupName string, vir func (client SubnetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -192,6 +193,7 @@ func (client SubnetsClient) DeletePreparer(resourceGroupName string, virtualNetw func (client SubnetsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -261,7 +263,9 @@ func (client SubnetsClient) GetPreparer(resourceGroupName string, virtualNetwork // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client SubnetsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -326,7 +330,9 @@ func (client SubnetsClient) ListPreparer(resourceGroupName string, virtualNetwor // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client SubnetsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go index 85c220065404..ab2c9b8fcb3e 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go @@ -93,7 +93,9 @@ func (client UsagesClient) ListPreparer(location string) (*http.Request, error) // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client UsagesClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go index 7f510b4e4c6d..423a7117d8ff 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go @@ -19,10 +19,10 @@ package network // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/v11.0.0-beta arm-network/" + return "Azure-SDK-For-Go/v12.4.0-beta arm-network/" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return "v11.0.0-beta" + return "v12.4.0-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go index 7db4d5de6c38..faac6f8a6797 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go @@ -126,6 +126,7 @@ func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdatePreparer(reso func (client VirtualNetworkGatewayConnectionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -209,6 +210,7 @@ func (client VirtualNetworkGatewayConnectionsClient) DeletePreparer(resourceGrou func (client VirtualNetworkGatewayConnectionsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -274,7 +276,9 @@ func (client VirtualNetworkGatewayConnectionsClient) GetPreparer(resourceGroupNa // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewayConnectionsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -341,7 +345,9 @@ func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeyPreparer(resour // GetSharedKeySender sends the GetSharedKey request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewayConnectionsClient) GetSharedKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetSharedKeyResponder handles the response to the GetSharedKey request. The method always @@ -406,7 +412,9 @@ func (client VirtualNetworkGatewayConnectionsClient) ListPreparer(resourceGroupN // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewayConnectionsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -574,6 +582,7 @@ func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeyPreparer(reso func (client VirtualNetworkGatewayConnectionsClient) ResetSharedKeySender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -670,6 +679,7 @@ func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeyPreparer(resour func (client VirtualNetworkGatewayConnectionsClient) SetSharedKeySender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go index 5f56a204badf..6edd19644c5b 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go @@ -117,6 +117,7 @@ func (client VirtualNetworkGatewaysClient) CreateOrUpdatePreparer(resourceGroupN func (client VirtualNetworkGatewaysClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -200,6 +201,7 @@ func (client VirtualNetworkGatewaysClient) DeletePreparer(resourceGroupName stri func (client VirtualNetworkGatewaysClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -284,6 +286,7 @@ func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackagePreparer(reso func (client VirtualNetworkGatewaysClient) GeneratevpnclientpackageSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -370,6 +373,7 @@ func (client VirtualNetworkGatewaysClient) GenerateVpnProfilePreparer(resourceGr func (client VirtualNetworkGatewaysClient) GenerateVpnProfileSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -436,7 +440,9 @@ func (client VirtualNetworkGatewaysClient) GetPreparer(resourceGroupName string, // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -520,6 +526,7 @@ func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutesPreparer(resourceG func (client VirtualNetworkGatewaysClient) GetAdvertisedRoutesSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -606,6 +613,7 @@ func (client VirtualNetworkGatewaysClient) GetBgpPeerStatusPreparer(resourceGrou func (client VirtualNetworkGatewaysClient) GetBgpPeerStatusSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -689,6 +697,7 @@ func (client VirtualNetworkGatewaysClient) GetLearnedRoutesPreparer(resourceGrou func (client VirtualNetworkGatewaysClient) GetLearnedRoutesSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -773,6 +782,7 @@ func (client VirtualNetworkGatewaysClient) GetVpnProfilePackageURLPreparer(resou func (client VirtualNetworkGatewaysClient) GetVpnProfilePackageURLSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -837,7 +847,9 @@ func (client VirtualNetworkGatewaysClient) ListPreparer(resourceGroupName string // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewaysClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -972,7 +984,9 @@ func (client VirtualNetworkGatewaysClient) ListConnectionsPreparer(resourceGroup // ListConnectionsSender sends the ListConnections request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewaysClient) ListConnectionsSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListConnectionsResponder handles the response to the ListConnections request. The method always @@ -1128,6 +1142,7 @@ func (client VirtualNetworkGatewaysClient) ResetPreparer(resourceGroupName strin func (client VirtualNetworkGatewaysClient) ResetSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1194,7 +1209,9 @@ func (client VirtualNetworkGatewaysClient) SupportedVpnDevicesPreparer(resourceG // SupportedVpnDevicesSender sends the SupportedVpnDevices request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewaysClient) SupportedVpnDevicesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // SupportedVpnDevicesResponder handles the response to the SupportedVpnDevices request. The method always @@ -1263,7 +1280,9 @@ func (client VirtualNetworkGatewaysClient) VpnDeviceConfigurationScriptPreparer( // VpnDeviceConfigurationScriptSender sends the VpnDeviceConfigurationScript request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkGatewaysClient) VpnDeviceConfigurationScriptSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // VpnDeviceConfigurationScriptResponder handles the response to the VpnDeviceConfigurationScript request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go index 0a945c1bb8d7..30bed99c02cd 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go @@ -109,6 +109,7 @@ func (client VirtualNetworkPeeringsClient) CreateOrUpdatePreparer(resourceGroupN func (client VirtualNetworkPeeringsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -193,6 +194,7 @@ func (client VirtualNetworkPeeringsClient) DeletePreparer(resourceGroupName stri func (client VirtualNetworkPeeringsClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -259,7 +261,9 @@ func (client VirtualNetworkPeeringsClient) GetPreparer(resourceGroupName string, // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkPeeringsClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -324,7 +328,9 @@ func (client VirtualNetworkPeeringsClient) ListPreparer(resourceGroupName string // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworkPeeringsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go index 58ac8ab11229..47c71480661f 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go @@ -91,7 +91,9 @@ func (client VirtualNetworksClient) CheckIPAddressAvailabilityPreparer(resourceG // CheckIPAddressAvailabilitySender sends the CheckIPAddressAvailability request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworksClient) CheckIPAddressAvailabilitySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // CheckIPAddressAvailabilityResponder handles the response to the CheckIPAddressAvailability request. The method always @@ -176,6 +178,7 @@ func (client VirtualNetworksClient) CreateOrUpdatePreparer(resourceGroupName str func (client VirtualNetworksClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -257,6 +260,7 @@ func (client VirtualNetworksClient) DeletePreparer(resourceGroupName string, vir func (client VirtualNetworksClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -325,7 +329,9 @@ func (client VirtualNetworksClient) GetPreparer(resourceGroupName string, virtua // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworksClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -389,7 +395,9 @@ func (client VirtualNetworksClient) ListPreparer(resourceGroupName string) (*htt // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworksClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -519,7 +527,9 @@ func (client VirtualNetworksClient) ListAllPreparer() (*http.Request, error) { // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworksClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -653,7 +663,9 @@ func (client VirtualNetworksClient) ListUsagePreparer(resourceGroupName string, // ListUsageSender sends the ListUsage request. The method will close the // http.Response Body if it receives an error. func (client VirtualNetworksClient) ListUsageSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListUsageResponder handles the response to the ListUsage request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/watchers.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/watchers.go index 798d8c1bb221..b075417ddbee 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/watchers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/watchers.go @@ -120,6 +120,7 @@ func (client WatchersClient) CheckConnectivityPreparer(resourceGroupName string, func (client WatchersClient) CheckConnectivitySender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -188,7 +189,9 @@ func (client WatchersClient) CreateOrUpdatePreparer(resourceGroupName string, ne // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client WatchersClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always @@ -270,6 +273,7 @@ func (client WatchersClient) DeletePreparer(resourceGroupName string, networkWat func (client WatchersClient) DeleteSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -334,7 +338,9 @@ func (client WatchersClient) GetPreparer(resourceGroupName string, networkWatche // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client WatchersClient) GetSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always @@ -431,6 +437,7 @@ func (client WatchersClient) GetAzureReachabilityReportPreparer(resourceGroupNam func (client WatchersClient) GetAzureReachabilityReportSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -525,6 +532,7 @@ func (client WatchersClient) GetFlowLogStatusPreparer(resourceGroupName string, func (client WatchersClient) GetFlowLogStatusSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -620,6 +628,7 @@ func (client WatchersClient) GetNextHopPreparer(resourceGroupName string, networ func (client WatchersClient) GetNextHopSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -694,7 +703,9 @@ func (client WatchersClient) GetTopologyPreparer(resourceGroupName string, netwo // GetTopologySender sends the GetTopology request. The method will close the // http.Response Body if it receives an error. func (client WatchersClient) GetTopologySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetTopologyResponder handles the response to the GetTopology request. The method always @@ -792,6 +803,7 @@ func (client WatchersClient) GetTroubleshootingPreparer(resourceGroupName string func (client WatchersClient) GetTroubleshootingSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -886,6 +898,7 @@ func (client WatchersClient) GetTroubleshootingResultPreparer(resourceGroupName func (client WatchersClient) GetTroubleshootingResultSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -980,6 +993,7 @@ func (client WatchersClient) GetVMSecurityRulesPreparer(resourceGroupName string func (client WatchersClient) GetVMSecurityRulesSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1044,7 +1058,9 @@ func (client WatchersClient) ListPreparer(resourceGroupName string) (*http.Reque // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client WatchersClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -1105,7 +1121,9 @@ func (client WatchersClient) ListAllPreparer() (*http.Request, error) { // ListAllSender sends the ListAll request. The method will close the // http.Response Body if it receives an error. func (client WatchersClient) ListAllSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAllResponder handles the response to the ListAll request. The method always @@ -1190,6 +1208,7 @@ func (client WatchersClient) ListAvailableProvidersPreparer(resourceGroupName st func (client WatchersClient) ListAvailableProvidersSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1288,6 +1307,7 @@ func (client WatchersClient) SetFlowLogConfigurationPreparer(resourceGroupName s func (client WatchersClient) SetFlowLogConfigurationSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -1386,6 +1406,7 @@ func (client WatchersClient) VerifyIPFlowPreparer(resourceGroupName string, netw func (client WatchersClient) VerifyIPFlowSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go index 0870a03ded6e..dfa59f957837 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go @@ -96,7 +96,9 @@ func (client AccountsClient) CheckNameAvailabilityPreparer(accountName AccountCh // CheckNameAvailabilitySender sends the CheckNameAvailability request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) CheckNameAvailabilitySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // CheckNameAvailabilityResponder handles the response to the CheckNameAvailability request. The method always @@ -208,6 +210,7 @@ func (client AccountsClient) CreatePreparer(resourceGroupName string, accountNam func (client AccountsClient) CreateSender(req *http.Request) (*http.Response, error) { return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client), azure.DoPollForAsynchronous(client.PollingDelay)) } @@ -286,7 +289,9 @@ func (client AccountsClient) DeletePreparer(resourceGroupName string, accountNam // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) DeleteSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // DeleteResponder handles the response to the Delete request. The method always @@ -364,7 +369,9 @@ func (client AccountsClient) GetPropertiesPreparer(resourceGroupName string, acc // GetPropertiesSender sends the GetProperties request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) GetPropertiesSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // GetPropertiesResponder handles the response to the GetProperties request. The method always @@ -426,7 +433,9 @@ func (client AccountsClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always @@ -509,7 +518,9 @@ func (client AccountsClient) ListAccountSASPreparer(resourceGroupName string, ac // ListAccountSASSender sends the ListAccountSAS request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) ListAccountSASSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListAccountSASResponder handles the response to the ListAccountSAS request. The method always @@ -582,7 +593,9 @@ func (client AccountsClient) ListByResourceGroupPreparer(resourceGroupName strin // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always @@ -660,7 +673,9 @@ func (client AccountsClient) ListKeysPreparer(resourceGroupName string, accountN // ListKeysSender sends the ListKeys request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) ListKeysSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListKeysResponder handles the response to the ListKeys request. The method always @@ -745,7 +760,9 @@ func (client AccountsClient) ListServiceSASPreparer(resourceGroupName string, ac // ListServiceSASSender sends the ListServiceSAS request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) ListServiceSASSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListServiceSASResponder handles the response to the ListServiceSAS request. The method always @@ -828,7 +845,9 @@ func (client AccountsClient) RegenerateKeyPreparer(resourceGroupName string, acc // RegenerateKeySender sends the RegenerateKey request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) RegenerateKeySender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // RegenerateKeyResponder handles the response to the RegenerateKey request. The method always @@ -914,7 +933,9 @@ func (client AccountsClient) UpdatePreparer(resourceGroupName string, accountNam // UpdateSender sends the Update request. The method will close the // http.Response Body if it receives an error. func (client AccountsClient) UpdateSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // UpdateResponder handles the response to the Update request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go index 133386ddbeb6..1c54682152d8 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go @@ -1,6 +1,8 @@ // Package storage implements the Azure ARM Storage service API version 2017-06-01. // // The Azure Storage Management API. +// +// Deprecated: Please instead use github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-06-01/storage package storage // Copyright (c) Microsoft and contributors. All rights reserved. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go index cc46c6997920..ca46f8e13601 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/operations.go @@ -79,7 +79,9 @@ func (client OperationsClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go index 94d4d6f83ec5..cfad757fb223 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/skus.go @@ -83,7 +83,9 @@ func (client SkusClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client SkusClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go index 682e5c16c362..933c5d9617e8 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usage.go @@ -83,7 +83,9 @@ func (client UsageClient) ListPreparer() (*http.Request, error) { // ListSender sends the List request. The method will close the // http.Response Body if it receives an error. func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) { - return autorest.SendWithSender(client, req) + return autorest.SendWithSender(client, + req, + azure.DoRetryWithRegistration(client.Client)) } // ListResponder handles the response to the List request. The method always diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go index 467102d59734..47e1b25d4059 100755 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go @@ -19,10 +19,10 @@ package storage // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return "Azure-SDK-For-Go/v11.0.0-beta arm-storage/2017-06-01" + return "Azure-SDK-For-Go/v12.4.0-beta arm-storage/2017-06-01" } // Version returns the semantic version (see http://semver.org) of the client. func Version() string { - return "v11.0.0-beta" + return "v12.4.0-beta" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/availabilitysets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/availabilitysets.go new file mode 100644 index 000000000000..f1f4cb5eb995 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/availabilitysets.go @@ -0,0 +1,373 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// AvailabilitySetsClient is the compute Client +type AvailabilitySetsClient struct { + BaseClient +} + +// NewAvailabilitySetsClient creates an instance of the AvailabilitySetsClient client. +func NewAvailabilitySetsClient(subscriptionID string) AvailabilitySetsClient { + return NewAvailabilitySetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAvailabilitySetsClientWithBaseURI creates an instance of the AvailabilitySetsClient client. +func NewAvailabilitySetsClientWithBaseURI(baseURI string, subscriptionID string) AvailabilitySetsClient { + return AvailabilitySetsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update an availability set. +// +// resourceGroupName is the name of the resource group. availabilitySetName is the name of the availability set. +// parameters is parameters supplied to the Create Availability Set operation. +func (client AvailabilitySetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySet) (result AvailabilitySet, err error) { + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, availabilitySetName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client AvailabilitySetsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, availabilitySetName string, parameters AvailabilitySet) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "availabilitySetName": autorest.Encode("path", availabilitySetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) CreateOrUpdateResponder(resp *http.Response) (result AvailabilitySet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete an availability set. +// +// resourceGroupName is the name of the resource group. availabilitySetName is the name of the availability set. +func (client AvailabilitySetsClient) Delete(ctx context.Context, resourceGroupName string, availabilitySetName string) (result OperationStatusResponse, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, availabilitySetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AvailabilitySetsClient) DeletePreparer(ctx context.Context, resourceGroupName string, availabilitySetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "availabilitySetName": autorest.Encode("path", availabilitySetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get retrieves information about an availability set. +// +// resourceGroupName is the name of the resource group. availabilitySetName is the name of the availability set. +func (client AvailabilitySetsClient) Get(ctx context.Context, resourceGroupName string, availabilitySetName string) (result AvailabilitySet, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, availabilitySetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client AvailabilitySetsClient) GetPreparer(ctx context.Context, resourceGroupName string, availabilitySetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "availabilitySetName": autorest.Encode("path", availabilitySetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) GetResponder(resp *http.Response) (result AvailabilitySet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all availability sets in a resource group. +// +// resourceGroupName is the name of the resource group. +func (client AvailabilitySetsClient) List(ctx context.Context, resourceGroupName string) (result AvailabilitySetListResult, err error) { + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client AvailabilitySetsClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) ListResponder(resp *http.Response) (result AvailabilitySetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAvailableSizes lists all available virtual machine sizes that can be used to create a new virtual machine in an +// existing availability set. +// +// resourceGroupName is the name of the resource group. availabilitySetName is the name of the availability set. +func (client AvailabilitySetsClient) ListAvailableSizes(ctx context.Context, resourceGroupName string, availabilitySetName string) (result VirtualMachineSizeListResult, err error) { + req, err := client.ListAvailableSizesPreparer(ctx, resourceGroupName, availabilitySetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", nil, "Failure preparing request") + return + } + + resp, err := client.ListAvailableSizesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", resp, "Failure sending request") + return + } + + result, err = client.ListAvailableSizesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.AvailabilitySetsClient", "ListAvailableSizes", resp, "Failure responding to request") + } + + return +} + +// ListAvailableSizesPreparer prepares the ListAvailableSizes request. +func (client AvailabilitySetsClient) ListAvailableSizesPreparer(ctx context.Context, resourceGroupName string, availabilitySetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "availabilitySetName": autorest.Encode("path", availabilitySetName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/availabilitySets/{availabilitySetName}/vmSizes", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the +// http.Response Body if it receives an error. +func (client AvailabilitySetsClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always +// closes the http.Response Body. +func (client AvailabilitySetsClient) ListAvailableSizesResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/client.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/client.go new file mode 100644 index 000000000000..b23c9ca74268 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/client.go @@ -0,0 +1,51 @@ +// Package compute implements the Azure ARM Compute service API version . +// +// Compute Client +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Compute + DefaultBaseURI = "https://management.azure.com" +) + +// BaseClient is the base client for Compute. +type BaseClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the BaseClient client. +func New(subscriptionID string) BaseClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the BaseClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/containerservices.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/containerservices.go new file mode 100644 index 000000000000..7bc59c9ed406 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/containerservices.go @@ -0,0 +1,472 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// ContainerServicesClient is the compute Client +type ContainerServicesClient struct { + BaseClient +} + +// NewContainerServicesClient creates an instance of the ContainerServicesClient client. +func NewContainerServicesClient(subscriptionID string) ContainerServicesClient { + return NewContainerServicesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewContainerServicesClientWithBaseURI creates an instance of the ContainerServicesClient client. +func NewContainerServicesClientWithBaseURI(baseURI string, subscriptionID string) ContainerServicesClient { + return ContainerServicesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a container service with the specified configuration of orchestrator, masters, and +// agents. +// +// resourceGroupName is the name of the resource group. containerServiceName is the name of the container service in +// the specified subscription and resource group. parameters is parameters supplied to the Create or Update a Container +// Service operation. +func (client ContainerServicesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, containerServiceName string, parameters ContainerService) (result ContainerServicesCreateOrUpdateFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ContainerServiceProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.CustomProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.CustomProfile.Orchestrator", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.ContainerServiceProperties.ServicePrincipalProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.ServicePrincipalProfile.ClientID", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.ContainerServiceProperties.ServicePrincipalProfile.Secret", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "parameters.ContainerServiceProperties.MasterProfile", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.MasterProfile.DNSPrefix", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.ContainerServiceProperties.AgentPoolProfiles", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.ContainerServiceProperties.WindowsProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminUsername", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminUsername", Name: validation.Pattern, Rule: `^[a-zA-Z0-9]+([._]?[a-zA-Z0-9]+)*$`, Chain: nil}}}, + {Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminPassword", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.WindowsProfile.AdminPassword", Name: validation.Pattern, Rule: `^(?=.*[a-z])(?=.*[A-Z])(?=.*[!@#$%\^&\*\(\)])[a-zA-Z\d!@#$%\^&\*\(\)]{12,123}$`, Chain: nil}}}, + }}, + {Target: "parameters.ContainerServiceProperties.LinuxProfile", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.LinuxProfile.AdminUsername", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.LinuxProfile.AdminUsername", Name: validation.Pattern, Rule: `^[a-z][a-z0-9_-]*$`, Chain: nil}}}, + {Target: "parameters.ContainerServiceProperties.LinuxProfile.SSH", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.LinuxProfile.SSH.PublicKeys", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "parameters.ContainerServiceProperties.DiagnosticsProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.DiagnosticsProfile.VMDiagnostics", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.ContainerServiceProperties.DiagnosticsProfile.VMDiagnostics.Enabled", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.ContainerServicesClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, containerServiceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ContainerServicesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, containerServiceName string, parameters ContainerService) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) CreateOrUpdateSender(req *http.Request) (future ContainerServicesCreateOrUpdateFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted)) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) CreateOrUpdateResponder(resp *http.Response) (result ContainerService, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the specified container service in the specified subscription and resource group. The operation does +// not delete other resources created as part of creating a container service, including storage accounts, VMs, and +// availability sets. All the other resources created with the container service are part of the same resource group +// and can be deleted individually. +// +// resourceGroupName is the name of the resource group. containerServiceName is the name of the container service in +// the specified subscription and resource group. +func (client ContainerServicesClient) Delete(ctx context.Context, resourceGroupName string, containerServiceName string) (result ContainerServicesDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, containerServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ContainerServicesClient) DeletePreparer(ctx context.Context, resourceGroupName string, containerServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) DeleteSender(req *http.Request) (future ContainerServicesDeleteFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the properties of the specified container service in the specified subscription and resource group. The +// operation returns the properties including state, orchestrator, number of masters and agents, and FQDNs of masters +// and agents. +// +// resourceGroupName is the name of the resource group. containerServiceName is the name of the container service in +// the specified subscription and resource group. +func (client ContainerServicesClient) Get(ctx context.Context, resourceGroupName string, containerServiceName string) (result ContainerService, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, containerServiceName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ContainerServicesClient) GetPreparer(ctx context.Context, resourceGroupName string, containerServiceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "containerServiceName": autorest.Encode("path", containerServiceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices/{containerServiceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) GetResponder(resp *http.Response) (result ContainerService, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of container services in the specified subscription. The operation returns properties of each +// container service including state, orchestrator, number of masters and agents, and FQDNs of masters and agents. +func (client ContainerServicesClient) List(ctx context.Context) (result ContainerServiceListResultPage, err error) { + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.cslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", resp, "Failure sending request") + return + } + + result.cslr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ContainerServicesClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/containerServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) ListResponder(resp *http.Response) (result ContainerServiceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ContainerServicesClient) listNextResults(lastResults ContainerServiceListResult) (result ContainerServiceListResult, err error) { + req, err := lastResults.containerServiceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ContainerServicesClient) ListComplete(ctx context.Context) (result ContainerServiceListResultIterator, err error) { + result.page, err = client.List(ctx) + return +} + +// ListByResourceGroup gets a list of container services in the specified subscription and resource group. The +// operation returns properties of each container service including state, orchestrator, number of masters and agents, +// and FQDNs of masters and agents. +// +// resourceGroupName is the name of the resource group. +func (client ContainerServicesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ContainerServiceListResultPage, err error) { + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.cslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.cslr, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client ContainerServicesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-01-31" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/containerServices", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ContainerServicesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client ContainerServicesClient) ListByResourceGroupResponder(resp *http.Response) (result ContainerServiceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client ContainerServicesClient) listByResourceGroupNextResults(lastResults ContainerServiceListResult) (result ContainerServiceListResult, err error) { + req, err := lastResults.containerServiceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ContainerServicesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client ContainerServicesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ContainerServiceListResultIterator, err error) { + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/disks.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/disks.go new file mode 100644 index 000000000000..6b812c6d34f5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/disks.go @@ -0,0 +1,676 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// DisksClient is the compute Client +type DisksClient struct { + BaseClient +} + +// NewDisksClient creates an instance of the DisksClient client. +func NewDisksClient(subscriptionID string) DisksClient { + return NewDisksClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDisksClientWithBaseURI creates an instance of the DisksClient client. +func NewDisksClientWithBaseURI(baseURI string, subscriptionID string) DisksClient { + return DisksClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a disk. +// +// resourceGroupName is the name of the resource group. diskName is the name of the managed disk that is being created. +// The name can't be changed after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The +// maximum name length is 80 characters. disk is disk object supplied in the body of the Put disk operation. +func (client DisksClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, diskName string, disk Disk) (result DisksCreateOrUpdateFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: disk, + Constraints: []validation.Constraint{{Target: "disk.DiskProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData.ImageReference", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "disk.DiskProperties.EncryptionSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "disk.DiskProperties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "disk.DiskProperties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "disk.DiskProperties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "disk.DiskProperties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.DisksClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, diskName, disk) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client DisksClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, diskName string, disk Disk) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithJSON(disk), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) CreateOrUpdateSender(req *http.Request) (future DisksCreateOrUpdateFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client DisksClient) CreateOrUpdateResponder(resp *http.Response) (result Disk, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a disk. +// +// resourceGroupName is the name of the resource group. diskName is the name of the managed disk that is being created. +// The name can't be changed after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The +// maximum name length is 80 characters. +func (client DisksClient) Delete(ctx context.Context, resourceGroupName string, diskName string) (result DisksDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, diskName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client DisksClient) DeletePreparer(ctx context.Context, resourceGroupName string, diskName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) DeleteSender(req *http.Request) (future DisksDeleteFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client DisksClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets information about a disk. +// +// resourceGroupName is the name of the resource group. diskName is the name of the managed disk that is being created. +// The name can't be changed after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The +// maximum name length is 80 characters. +func (client DisksClient) Get(ctx context.Context, resourceGroupName string, diskName string) (result Disk, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, diskName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DisksClient) GetPreparer(ctx context.Context, resourceGroupName string, diskName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DisksClient) GetResponder(resp *http.Response) (result Disk, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GrantAccess grants access to a disk. +// +// resourceGroupName is the name of the resource group. diskName is the name of the managed disk that is being created. +// The name can't be changed after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The +// maximum name length is 80 characters. grantAccessData is access data object supplied in the body of the get disk +// access operation. +func (client DisksClient) GrantAccess(ctx context.Context, resourceGroupName string, diskName string, grantAccessData GrantAccessData) (result DisksGrantAccessFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: grantAccessData, + Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.DisksClient", "GrantAccess") + } + + req, err := client.GrantAccessPreparer(ctx, resourceGroupName, diskName, grantAccessData) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "GrantAccess", nil, "Failure preparing request") + return + } + + result, err = client.GrantAccessSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "GrantAccess", result.Response(), "Failure sending request") + return + } + + return +} + +// GrantAccessPreparer prepares the GrantAccess request. +func (client DisksClient) GrantAccessPreparer(ctx context.Context, resourceGroupName string, diskName string, grantAccessData GrantAccessData) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess", pathParameters), + autorest.WithJSON(grantAccessData), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GrantAccessSender sends the GrantAccess request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) GrantAccessSender(req *http.Request) (future DisksGrantAccessFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// GrantAccessResponder handles the response to the GrantAccess request. The method always +// closes the http.Response Body. +func (client DisksClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the disks under a subscription. +func (client DisksClient) List(ctx context.Context) (result DiskListPage, err error) { + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.dl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DisksClient", "List", resp, "Failure sending request") + return + } + + result.dl, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DisksClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DisksClient) ListResponder(resp *http.Response) (result DiskList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client DisksClient) listNextResults(lastResults DiskList) (result DiskList, err error) { + req, err := lastResults.diskListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.DisksClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.DisksClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client DisksClient) ListComplete(ctx context.Context) (result DiskListIterator, err error) { + result.page, err = client.List(ctx) + return +} + +// ListByResourceGroup lists all the disks under a resource group. +// +// resourceGroupName is the name of the resource group. +func (client DisksClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result DiskListPage, err error) { + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.dl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.dl, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client DisksClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client DisksClient) ListByResourceGroupResponder(resp *http.Response) (result DiskList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client DisksClient) listByResourceGroupNextResults(lastResults DiskList) (result DiskList, err error) { + req, err := lastResults.diskListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.DisksClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.DisksClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client DisksClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result DiskListIterator, err error) { + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} + +// RevokeAccess revokes access to a disk. +// +// resourceGroupName is the name of the resource group. diskName is the name of the managed disk that is being created. +// The name can't be changed after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The +// maximum name length is 80 characters. +func (client DisksClient) RevokeAccess(ctx context.Context, resourceGroupName string, diskName string) (result DisksRevokeAccessFuture, err error) { + req, err := client.RevokeAccessPreparer(ctx, resourceGroupName, diskName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "RevokeAccess", nil, "Failure preparing request") + return + } + + result, err = client.RevokeAccessSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "RevokeAccess", result.Response(), "Failure sending request") + return + } + + return +} + +// RevokeAccessPreparer prepares the RevokeAccess request. +func (client DisksClient) RevokeAccessPreparer(ctx context.Context, resourceGroupName string, diskName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RevokeAccessSender sends the RevokeAccess request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) RevokeAccessSender(req *http.Request) (future DisksRevokeAccessFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// RevokeAccessResponder handles the response to the RevokeAccess request. The method always +// closes the http.Response Body. +func (client DisksClient) RevokeAccessResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates (patches) a disk. +// +// resourceGroupName is the name of the resource group. diskName is the name of the managed disk that is being created. +// The name can't be changed after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The +// maximum name length is 80 characters. disk is disk object supplied in the body of the Patch disk operation. +func (client DisksClient) Update(ctx context.Context, resourceGroupName string, diskName string, disk DiskUpdate) (result DisksUpdateFuture, err error) { + req, err := client.UpdatePreparer(ctx, resourceGroupName, diskName, disk) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.DisksClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client DisksClient) UpdatePreparer(ctx context.Context, resourceGroupName string, diskName string, disk DiskUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithJSON(disk), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) UpdateSender(req *http.Request) (future DisksUpdateFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client DisksClient) UpdateResponder(resp *http.Response) (result Disk, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/images.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/images.go new file mode 100644 index 000000000000..7cdb58154b44 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/images.go @@ -0,0 +1,443 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// ImagesClient is the compute Client +type ImagesClient struct { + BaseClient +} + +// NewImagesClient creates an instance of the ImagesClient client. +func NewImagesClient(subscriptionID string) ImagesClient { + return NewImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewImagesClientWithBaseURI creates an instance of the ImagesClient client. +func NewImagesClientWithBaseURI(baseURI string, subscriptionID string) ImagesClient { + return ImagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update an image. +// +// resourceGroupName is the name of the resource group. imageName is the name of the image. parameters is parameters +// supplied to the Create Image operation. +func (client ImagesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, imageName string, parameters Image) (result ImagesCreateOrUpdateFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.ImageProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ImageProperties.StorageProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.ImageProperties.StorageProfile.OsDisk", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.ImagesClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, imageName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ImagesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, imageName string, parameters Image) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "imageName": autorest.Encode("path", imageName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ImagesClient) CreateOrUpdateSender(req *http.Request) (future ImagesCreateOrUpdateFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ImagesClient) CreateOrUpdateResponder(resp *http.Response) (result Image, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes an Image. +// +// resourceGroupName is the name of the resource group. imageName is the name of the image. +func (client ImagesClient) Delete(ctx context.Context, resourceGroupName string, imageName string) (result ImagesDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, imageName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ImagesClient) DeletePreparer(ctx context.Context, resourceGroupName string, imageName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "imageName": autorest.Encode("path", imageName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ImagesClient) DeleteSender(req *http.Request) (future ImagesDeleteFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ImagesClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets an image. +// +// resourceGroupName is the name of the resource group. imageName is the name of the image. expand is the expand +// expression to apply on the operation. +func (client ImagesClient) Get(ctx context.Context, resourceGroupName string, imageName string, expand string) (result Image, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, imageName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ImagesClient) GetPreparer(ctx context.Context, resourceGroupName string, imageName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "imageName": autorest.Encode("path", imageName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ImagesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ImagesClient) GetResponder(resp *http.Response) (result Image, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets the list of Images in the subscription. Use nextLink property in the response to get the next page of +// Images. Do this till nextLink is null to fetch all the Images. +func (client ImagesClient) List(ctx context.Context) (result ImageListResultPage, err error) { + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.ilr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "List", resp, "Failure sending request") + return + } + + result.ilr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ImagesClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ImagesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ImagesClient) ListResponder(resp *http.Response) (result ImageListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ImagesClient) listNextResults(lastResults ImageListResult) (result ImageListResult, err error) { + req, err := lastResults.imageListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ImagesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ImagesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ImagesClient) ListComplete(ctx context.Context) (result ImageListResultIterator, err error) { + result.page, err = client.List(ctx) + return +} + +// ListByResourceGroup gets the list of images under a resource group. +// +// resourceGroupName is the name of the resource group. +func (client ImagesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ImageListResultPage, err error) { + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.ilr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.ilr, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client ImagesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ImagesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client ImagesClient) ListByResourceGroupResponder(resp *http.Response) (result ImageListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client ImagesClient) listByResourceGroupNextResults(lastResults ImageListResult) (result ImageListResult, err error) { + req, err := lastResults.imageListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ImagesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ImagesClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ImagesClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client ImagesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result ImageListResultIterator, err error) { + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/loganalytics.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/loganalytics.go new file mode 100644 index 000000000000..f6cb3bd1ab66 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/loganalytics.go @@ -0,0 +1,195 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// LogAnalyticsClient is the compute Client +type LogAnalyticsClient struct { + BaseClient +} + +// NewLogAnalyticsClient creates an instance of the LogAnalyticsClient client. +func NewLogAnalyticsClient(subscriptionID string) LogAnalyticsClient { + return NewLogAnalyticsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLogAnalyticsClientWithBaseURI creates an instance of the LogAnalyticsClient client. +func NewLogAnalyticsClientWithBaseURI(baseURI string, subscriptionID string) LogAnalyticsClient { + return LogAnalyticsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// ExportRequestRateByInterval export logs that show Api requests made by this subscription in the given time window to +// show throttling activities. +// +// parameters is parameters supplied to the LogAnalytics getRequestRateByInterval Api. location is the location upon +// which virtual-machine-sizes is queried. +func (client LogAnalyticsClient) ExportRequestRateByInterval(ctx context.Context, parameters RequestRateByIntervalInput, location string) (result LogAnalyticsExportRequestRateByIntervalFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: location, + Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.LogAnalyticsClient", "ExportRequestRateByInterval") + } + + req, err := client.ExportRequestRateByIntervalPreparer(ctx, parameters, location) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsClient", "ExportRequestRateByInterval", nil, "Failure preparing request") + return + } + + result, err = client.ExportRequestRateByIntervalSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsClient", "ExportRequestRateByInterval", result.Response(), "Failure sending request") + return + } + + return +} + +// ExportRequestRateByIntervalPreparer prepares the ExportRequestRateByInterval request. +func (client LogAnalyticsClient) ExportRequestRateByIntervalPreparer(ctx context.Context, parameters RequestRateByIntervalInput, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getRequestRateByInterval", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ExportRequestRateByIntervalSender sends the ExportRequestRateByInterval request. The method will close the +// http.Response Body if it receives an error. +func (client LogAnalyticsClient) ExportRequestRateByIntervalSender(req *http.Request) (future LogAnalyticsExportRequestRateByIntervalFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// ExportRequestRateByIntervalResponder handles the response to the ExportRequestRateByInterval request. The method always +// closes the http.Response Body. +func (client LogAnalyticsClient) ExportRequestRateByIntervalResponder(resp *http.Response) (result LogAnalyticsOperationResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ExportThrottledRequests export logs that show total throttled Api requests for this subscription in the given time +// window. +// +// parameters is parameters supplied to the LogAnalytics getThrottledRequests Api. location is the location upon which +// virtual-machine-sizes is queried. +func (client LogAnalyticsClient) ExportThrottledRequests(ctx context.Context, parameters ThrottledRequestsInput, location string) (result LogAnalyticsExportThrottledRequestsFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: location, + Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.LogAnalyticsClient", "ExportThrottledRequests") + } + + req, err := client.ExportThrottledRequestsPreparer(ctx, parameters, location) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsClient", "ExportThrottledRequests", nil, "Failure preparing request") + return + } + + result, err = client.ExportThrottledRequestsSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.LogAnalyticsClient", "ExportThrottledRequests", result.Response(), "Failure sending request") + return + } + + return +} + +// ExportThrottledRequestsPreparer prepares the ExportThrottledRequests request. +func (client LogAnalyticsClient) ExportThrottledRequestsPreparer(ctx context.Context, parameters ThrottledRequestsInput, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getThrottledRequests", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ExportThrottledRequestsSender sends the ExportThrottledRequests request. The method will close the +// http.Response Body if it receives an error. +func (client LogAnalyticsClient) ExportThrottledRequestsSender(req *http.Request) (future LogAnalyticsExportThrottledRequestsFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// ExportThrottledRequestsResponder handles the response to the ExportThrottledRequests request. The method always +// closes the http.Response Body. +func (client LogAnalyticsClient) ExportThrottledRequestsResponder(resp *http.Response) (result LogAnalyticsOperationResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/models.go new file mode 100644 index 000000000000..02848ce546bd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/models.go @@ -0,0 +1,5809 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// AccessLevel enumerates the values for access level. +type AccessLevel string + +const ( + // None ... + None AccessLevel = "None" + // Read ... + Read AccessLevel = "Read" +) + +// CachingTypes enumerates the values for caching types. +type CachingTypes string + +const ( + // CachingTypesNone ... + CachingTypesNone CachingTypes = "None" + // CachingTypesReadOnly ... + CachingTypesReadOnly CachingTypes = "ReadOnly" + // CachingTypesReadWrite ... + CachingTypesReadWrite CachingTypes = "ReadWrite" +) + +// ComponentNames enumerates the values for component names. +type ComponentNames string + +const ( + // MicrosoftWindowsShellSetup ... + MicrosoftWindowsShellSetup ComponentNames = "Microsoft-Windows-Shell-Setup" +) + +// ContainerServiceOrchestratorTypes enumerates the values for container service orchestrator types. +type ContainerServiceOrchestratorTypes string + +const ( + // Custom ... + Custom ContainerServiceOrchestratorTypes = "Custom" + // DCOS ... + DCOS ContainerServiceOrchestratorTypes = "DCOS" + // Kubernetes ... + Kubernetes ContainerServiceOrchestratorTypes = "Kubernetes" + // Swarm ... + Swarm ContainerServiceOrchestratorTypes = "Swarm" +) + +// ContainerServiceVMSizeTypes enumerates the values for container service vm size types. +type ContainerServiceVMSizeTypes string + +const ( + // StandardA0 ... + StandardA0 ContainerServiceVMSizeTypes = "Standard_A0" + // StandardA1 ... + StandardA1 ContainerServiceVMSizeTypes = "Standard_A1" + // StandardA10 ... + StandardA10 ContainerServiceVMSizeTypes = "Standard_A10" + // StandardA11 ... + StandardA11 ContainerServiceVMSizeTypes = "Standard_A11" + // StandardA2 ... + StandardA2 ContainerServiceVMSizeTypes = "Standard_A2" + // StandardA3 ... + StandardA3 ContainerServiceVMSizeTypes = "Standard_A3" + // StandardA4 ... + StandardA4 ContainerServiceVMSizeTypes = "Standard_A4" + // StandardA5 ... + StandardA5 ContainerServiceVMSizeTypes = "Standard_A5" + // StandardA6 ... + StandardA6 ContainerServiceVMSizeTypes = "Standard_A6" + // StandardA7 ... + StandardA7 ContainerServiceVMSizeTypes = "Standard_A7" + // StandardA8 ... + StandardA8 ContainerServiceVMSizeTypes = "Standard_A8" + // StandardA9 ... + StandardA9 ContainerServiceVMSizeTypes = "Standard_A9" + // StandardD1 ... + StandardD1 ContainerServiceVMSizeTypes = "Standard_D1" + // StandardD11 ... + StandardD11 ContainerServiceVMSizeTypes = "Standard_D11" + // StandardD11V2 ... + StandardD11V2 ContainerServiceVMSizeTypes = "Standard_D11_v2" + // StandardD12 ... + StandardD12 ContainerServiceVMSizeTypes = "Standard_D12" + // StandardD12V2 ... + StandardD12V2 ContainerServiceVMSizeTypes = "Standard_D12_v2" + // StandardD13 ... + StandardD13 ContainerServiceVMSizeTypes = "Standard_D13" + // StandardD13V2 ... + StandardD13V2 ContainerServiceVMSizeTypes = "Standard_D13_v2" + // StandardD14 ... + StandardD14 ContainerServiceVMSizeTypes = "Standard_D14" + // StandardD14V2 ... + StandardD14V2 ContainerServiceVMSizeTypes = "Standard_D14_v2" + // StandardD1V2 ... + StandardD1V2 ContainerServiceVMSizeTypes = "Standard_D1_v2" + // StandardD2 ... + StandardD2 ContainerServiceVMSizeTypes = "Standard_D2" + // StandardD2V2 ... + StandardD2V2 ContainerServiceVMSizeTypes = "Standard_D2_v2" + // StandardD3 ... + StandardD3 ContainerServiceVMSizeTypes = "Standard_D3" + // StandardD3V2 ... + StandardD3V2 ContainerServiceVMSizeTypes = "Standard_D3_v2" + // StandardD4 ... + StandardD4 ContainerServiceVMSizeTypes = "Standard_D4" + // StandardD4V2 ... + StandardD4V2 ContainerServiceVMSizeTypes = "Standard_D4_v2" + // StandardD5V2 ... + StandardD5V2 ContainerServiceVMSizeTypes = "Standard_D5_v2" + // StandardDS1 ... + StandardDS1 ContainerServiceVMSizeTypes = "Standard_DS1" + // StandardDS11 ... + StandardDS11 ContainerServiceVMSizeTypes = "Standard_DS11" + // StandardDS12 ... + StandardDS12 ContainerServiceVMSizeTypes = "Standard_DS12" + // StandardDS13 ... + StandardDS13 ContainerServiceVMSizeTypes = "Standard_DS13" + // StandardDS14 ... + StandardDS14 ContainerServiceVMSizeTypes = "Standard_DS14" + // StandardDS2 ... + StandardDS2 ContainerServiceVMSizeTypes = "Standard_DS2" + // StandardDS3 ... + StandardDS3 ContainerServiceVMSizeTypes = "Standard_DS3" + // StandardDS4 ... + StandardDS4 ContainerServiceVMSizeTypes = "Standard_DS4" + // StandardG1 ... + StandardG1 ContainerServiceVMSizeTypes = "Standard_G1" + // StandardG2 ... + StandardG2 ContainerServiceVMSizeTypes = "Standard_G2" + // StandardG3 ... + StandardG3 ContainerServiceVMSizeTypes = "Standard_G3" + // StandardG4 ... + StandardG4 ContainerServiceVMSizeTypes = "Standard_G4" + // StandardG5 ... + StandardG5 ContainerServiceVMSizeTypes = "Standard_G5" + // StandardGS1 ... + StandardGS1 ContainerServiceVMSizeTypes = "Standard_GS1" + // StandardGS2 ... + StandardGS2 ContainerServiceVMSizeTypes = "Standard_GS2" + // StandardGS3 ... + StandardGS3 ContainerServiceVMSizeTypes = "Standard_GS3" + // StandardGS4 ... + StandardGS4 ContainerServiceVMSizeTypes = "Standard_GS4" + // StandardGS5 ... + StandardGS5 ContainerServiceVMSizeTypes = "Standard_GS5" +) + +// DiskCreateOption enumerates the values for disk create option. +type DiskCreateOption string + +const ( + // Attach ... + Attach DiskCreateOption = "Attach" + // Copy ... + Copy DiskCreateOption = "Copy" + // Empty ... + Empty DiskCreateOption = "Empty" + // FromImage ... + FromImage DiskCreateOption = "FromImage" + // Import ... + Import DiskCreateOption = "Import" +) + +// DiskCreateOptionTypes enumerates the values for disk create option types. +type DiskCreateOptionTypes string + +const ( + // DiskCreateOptionTypesAttach ... + DiskCreateOptionTypesAttach DiskCreateOptionTypes = "Attach" + // DiskCreateOptionTypesEmpty ... + DiskCreateOptionTypesEmpty DiskCreateOptionTypes = "Empty" + // DiskCreateOptionTypesFromImage ... + DiskCreateOptionTypesFromImage DiskCreateOptionTypes = "FromImage" +) + +// InstanceViewTypes enumerates the values for instance view types. +type InstanceViewTypes string + +const ( + // InstanceView ... + InstanceView InstanceViewTypes = "instanceView" +) + +// IntervalInMins enumerates the values for interval in mins. +type IntervalInMins string + +const ( + // FiveMins ... + FiveMins IntervalInMins = "FiveMins" + // SixtyMins ... + SixtyMins IntervalInMins = "SixtyMins" + // ThirtyMins ... + ThirtyMins IntervalInMins = "ThirtyMins" + // ThreeMins ... + ThreeMins IntervalInMins = "ThreeMins" +) + +// IPVersion enumerates the values for ip version. +type IPVersion string + +const ( + // IPv4 ... + IPv4 IPVersion = "IPv4" + // IPv6 ... + IPv6 IPVersion = "IPv6" +) + +// MaintenanceOperationResultCodeTypes enumerates the values for maintenance operation result code types. +type MaintenanceOperationResultCodeTypes string + +const ( + // MaintenanceOperationResultCodeTypesMaintenanceAborted ... + MaintenanceOperationResultCodeTypesMaintenanceAborted MaintenanceOperationResultCodeTypes = "MaintenanceAborted" + // MaintenanceOperationResultCodeTypesMaintenanceCompleted ... + MaintenanceOperationResultCodeTypesMaintenanceCompleted MaintenanceOperationResultCodeTypes = "MaintenanceCompleted" + // MaintenanceOperationResultCodeTypesNone ... + MaintenanceOperationResultCodeTypesNone MaintenanceOperationResultCodeTypes = "None" + // MaintenanceOperationResultCodeTypesRetryLater ... + MaintenanceOperationResultCodeTypesRetryLater MaintenanceOperationResultCodeTypes = "RetryLater" +) + +// OperatingSystemStateTypes enumerates the values for operating system state types. +type OperatingSystemStateTypes string + +const ( + // Generalized ... + Generalized OperatingSystemStateTypes = "Generalized" + // Specialized ... + Specialized OperatingSystemStateTypes = "Specialized" +) + +// OperatingSystemTypes enumerates the values for operating system types. +type OperatingSystemTypes string + +const ( + // Linux ... + Linux OperatingSystemTypes = "Linux" + // Windows ... + Windows OperatingSystemTypes = "Windows" +) + +// PassNames enumerates the values for pass names. +type PassNames string + +const ( + // OobeSystem ... + OobeSystem PassNames = "OobeSystem" +) + +// ProtocolTypes enumerates the values for protocol types. +type ProtocolTypes string + +const ( + // HTTP ... + HTTP ProtocolTypes = "Http" + // HTTPS ... + HTTPS ProtocolTypes = "Https" +) + +// ResourceIdentityType enumerates the values for resource identity type. +type ResourceIdentityType string + +const ( + // ResourceIdentityTypeNone ... + ResourceIdentityTypeNone ResourceIdentityType = "None" + // ResourceIdentityTypeSystemAssigned ... + ResourceIdentityTypeSystemAssigned ResourceIdentityType = "SystemAssigned" + // ResourceIdentityTypeSystemAssignedUserAssigned ... + ResourceIdentityTypeSystemAssignedUserAssigned ResourceIdentityType = "SystemAssigned, UserAssigned" + // ResourceIdentityTypeUserAssigned ... + ResourceIdentityTypeUserAssigned ResourceIdentityType = "UserAssigned" +) + +// ResourceSkuCapacityScaleType enumerates the values for resource sku capacity scale type. +type ResourceSkuCapacityScaleType string + +const ( + // ResourceSkuCapacityScaleTypeAutomatic ... + ResourceSkuCapacityScaleTypeAutomatic ResourceSkuCapacityScaleType = "Automatic" + // ResourceSkuCapacityScaleTypeManual ... + ResourceSkuCapacityScaleTypeManual ResourceSkuCapacityScaleType = "Manual" + // ResourceSkuCapacityScaleTypeNone ... + ResourceSkuCapacityScaleTypeNone ResourceSkuCapacityScaleType = "None" +) + +// ResourceSkuRestrictionsReasonCode enumerates the values for resource sku restrictions reason code. +type ResourceSkuRestrictionsReasonCode string + +const ( + // NotAvailableForSubscription ... + NotAvailableForSubscription ResourceSkuRestrictionsReasonCode = "NotAvailableForSubscription" + // QuotaID ... + QuotaID ResourceSkuRestrictionsReasonCode = "QuotaId" +) + +// ResourceSkuRestrictionsType enumerates the values for resource sku restrictions type. +type ResourceSkuRestrictionsType string + +const ( + // Location ... + Location ResourceSkuRestrictionsType = "Location" + // Zone ... + Zone ResourceSkuRestrictionsType = "Zone" +) + +// RollingUpgradeActionType enumerates the values for rolling upgrade action type. +type RollingUpgradeActionType string + +const ( + // Cancel ... + Cancel RollingUpgradeActionType = "Cancel" + // Start ... + Start RollingUpgradeActionType = "Start" +) + +// RollingUpgradeStatusCode enumerates the values for rolling upgrade status code. +type RollingUpgradeStatusCode string + +const ( + // Cancelled ... + Cancelled RollingUpgradeStatusCode = "Cancelled" + // Completed ... + Completed RollingUpgradeStatusCode = "Completed" + // Faulted ... + Faulted RollingUpgradeStatusCode = "Faulted" + // RollingForward ... + RollingForward RollingUpgradeStatusCode = "RollingForward" +) + +// SettingNames enumerates the values for setting names. +type SettingNames string + +const ( + // AutoLogon ... + AutoLogon SettingNames = "AutoLogon" + // FirstLogonCommands ... + FirstLogonCommands SettingNames = "FirstLogonCommands" +) + +// StatusLevelTypes enumerates the values for status level types. +type StatusLevelTypes string + +const ( + // Error ... + Error StatusLevelTypes = "Error" + // Info ... + Info StatusLevelTypes = "Info" + // Warning ... + Warning StatusLevelTypes = "Warning" +) + +// StorageAccountTypes enumerates the values for storage account types. +type StorageAccountTypes string + +const ( + // PremiumLRS ... + PremiumLRS StorageAccountTypes = "Premium_LRS" + // StandardLRS ... + StandardLRS StorageAccountTypes = "Standard_LRS" +) + +// UpgradeMode enumerates the values for upgrade mode. +type UpgradeMode string + +const ( + // Automatic ... + Automatic UpgradeMode = "Automatic" + // Manual ... + Manual UpgradeMode = "Manual" + // Rolling ... + Rolling UpgradeMode = "Rolling" +) + +// VirtualMachinePriorityTypes enumerates the values for virtual machine priority types. +type VirtualMachinePriorityTypes string + +const ( + // Low ... + Low VirtualMachinePriorityTypes = "Low" + // Regular ... + Regular VirtualMachinePriorityTypes = "Regular" +) + +// VirtualMachineScaleSetSkuScaleType enumerates the values for virtual machine scale set sku scale type. +type VirtualMachineScaleSetSkuScaleType string + +const ( + // VirtualMachineScaleSetSkuScaleTypeAutomatic ... + VirtualMachineScaleSetSkuScaleTypeAutomatic VirtualMachineScaleSetSkuScaleType = "Automatic" + // VirtualMachineScaleSetSkuScaleTypeNone ... + VirtualMachineScaleSetSkuScaleTypeNone VirtualMachineScaleSetSkuScaleType = "None" +) + +// VirtualMachineSizeTypes enumerates the values for virtual machine size types. +type VirtualMachineSizeTypes string + +const ( + // VirtualMachineSizeTypesBasicA0 ... + VirtualMachineSizeTypesBasicA0 VirtualMachineSizeTypes = "Basic_A0" + // VirtualMachineSizeTypesBasicA1 ... + VirtualMachineSizeTypesBasicA1 VirtualMachineSizeTypes = "Basic_A1" + // VirtualMachineSizeTypesBasicA2 ... + VirtualMachineSizeTypesBasicA2 VirtualMachineSizeTypes = "Basic_A2" + // VirtualMachineSizeTypesBasicA3 ... + VirtualMachineSizeTypesBasicA3 VirtualMachineSizeTypes = "Basic_A3" + // VirtualMachineSizeTypesBasicA4 ... + VirtualMachineSizeTypesBasicA4 VirtualMachineSizeTypes = "Basic_A4" + // VirtualMachineSizeTypesStandardA0 ... + VirtualMachineSizeTypesStandardA0 VirtualMachineSizeTypes = "Standard_A0" + // VirtualMachineSizeTypesStandardA1 ... + VirtualMachineSizeTypesStandardA1 VirtualMachineSizeTypes = "Standard_A1" + // VirtualMachineSizeTypesStandardA10 ... + VirtualMachineSizeTypesStandardA10 VirtualMachineSizeTypes = "Standard_A10" + // VirtualMachineSizeTypesStandardA11 ... + VirtualMachineSizeTypesStandardA11 VirtualMachineSizeTypes = "Standard_A11" + // VirtualMachineSizeTypesStandardA1V2 ... + VirtualMachineSizeTypesStandardA1V2 VirtualMachineSizeTypes = "Standard_A1_v2" + // VirtualMachineSizeTypesStandardA2 ... + VirtualMachineSizeTypesStandardA2 VirtualMachineSizeTypes = "Standard_A2" + // VirtualMachineSizeTypesStandardA2mV2 ... + VirtualMachineSizeTypesStandardA2mV2 VirtualMachineSizeTypes = "Standard_A2m_v2" + // VirtualMachineSizeTypesStandardA2V2 ... + VirtualMachineSizeTypesStandardA2V2 VirtualMachineSizeTypes = "Standard_A2_v2" + // VirtualMachineSizeTypesStandardA3 ... + VirtualMachineSizeTypesStandardA3 VirtualMachineSizeTypes = "Standard_A3" + // VirtualMachineSizeTypesStandardA4 ... + VirtualMachineSizeTypesStandardA4 VirtualMachineSizeTypes = "Standard_A4" + // VirtualMachineSizeTypesStandardA4mV2 ... + VirtualMachineSizeTypesStandardA4mV2 VirtualMachineSizeTypes = "Standard_A4m_v2" + // VirtualMachineSizeTypesStandardA4V2 ... + VirtualMachineSizeTypesStandardA4V2 VirtualMachineSizeTypes = "Standard_A4_v2" + // VirtualMachineSizeTypesStandardA5 ... + VirtualMachineSizeTypesStandardA5 VirtualMachineSizeTypes = "Standard_A5" + // VirtualMachineSizeTypesStandardA6 ... + VirtualMachineSizeTypesStandardA6 VirtualMachineSizeTypes = "Standard_A6" + // VirtualMachineSizeTypesStandardA7 ... + VirtualMachineSizeTypesStandardA7 VirtualMachineSizeTypes = "Standard_A7" + // VirtualMachineSizeTypesStandardA8 ... + VirtualMachineSizeTypesStandardA8 VirtualMachineSizeTypes = "Standard_A8" + // VirtualMachineSizeTypesStandardA8mV2 ... + VirtualMachineSizeTypesStandardA8mV2 VirtualMachineSizeTypes = "Standard_A8m_v2" + // VirtualMachineSizeTypesStandardA8V2 ... + VirtualMachineSizeTypesStandardA8V2 VirtualMachineSizeTypes = "Standard_A8_v2" + // VirtualMachineSizeTypesStandardA9 ... + VirtualMachineSizeTypesStandardA9 VirtualMachineSizeTypes = "Standard_A9" + // VirtualMachineSizeTypesStandardB1ms ... + VirtualMachineSizeTypesStandardB1ms VirtualMachineSizeTypes = "Standard_B1ms" + // VirtualMachineSizeTypesStandardB1s ... + VirtualMachineSizeTypesStandardB1s VirtualMachineSizeTypes = "Standard_B1s" + // VirtualMachineSizeTypesStandardB2ms ... + VirtualMachineSizeTypesStandardB2ms VirtualMachineSizeTypes = "Standard_B2ms" + // VirtualMachineSizeTypesStandardB2s ... + VirtualMachineSizeTypesStandardB2s VirtualMachineSizeTypes = "Standard_B2s" + // VirtualMachineSizeTypesStandardB4ms ... + VirtualMachineSizeTypesStandardB4ms VirtualMachineSizeTypes = "Standard_B4ms" + // VirtualMachineSizeTypesStandardB8ms ... + VirtualMachineSizeTypesStandardB8ms VirtualMachineSizeTypes = "Standard_B8ms" + // VirtualMachineSizeTypesStandardD1 ... + VirtualMachineSizeTypesStandardD1 VirtualMachineSizeTypes = "Standard_D1" + // VirtualMachineSizeTypesStandardD11 ... + VirtualMachineSizeTypesStandardD11 VirtualMachineSizeTypes = "Standard_D11" + // VirtualMachineSizeTypesStandardD11V2 ... + VirtualMachineSizeTypesStandardD11V2 VirtualMachineSizeTypes = "Standard_D11_v2" + // VirtualMachineSizeTypesStandardD12 ... + VirtualMachineSizeTypesStandardD12 VirtualMachineSizeTypes = "Standard_D12" + // VirtualMachineSizeTypesStandardD12V2 ... + VirtualMachineSizeTypesStandardD12V2 VirtualMachineSizeTypes = "Standard_D12_v2" + // VirtualMachineSizeTypesStandardD13 ... + VirtualMachineSizeTypesStandardD13 VirtualMachineSizeTypes = "Standard_D13" + // VirtualMachineSizeTypesStandardD13V2 ... + VirtualMachineSizeTypesStandardD13V2 VirtualMachineSizeTypes = "Standard_D13_v2" + // VirtualMachineSizeTypesStandardD14 ... + VirtualMachineSizeTypesStandardD14 VirtualMachineSizeTypes = "Standard_D14" + // VirtualMachineSizeTypesStandardD14V2 ... + VirtualMachineSizeTypesStandardD14V2 VirtualMachineSizeTypes = "Standard_D14_v2" + // VirtualMachineSizeTypesStandardD15V2 ... + VirtualMachineSizeTypesStandardD15V2 VirtualMachineSizeTypes = "Standard_D15_v2" + // VirtualMachineSizeTypesStandardD16sV3 ... + VirtualMachineSizeTypesStandardD16sV3 VirtualMachineSizeTypes = "Standard_D16s_v3" + // VirtualMachineSizeTypesStandardD16V3 ... + VirtualMachineSizeTypesStandardD16V3 VirtualMachineSizeTypes = "Standard_D16_v3" + // VirtualMachineSizeTypesStandardD1V2 ... + VirtualMachineSizeTypesStandardD1V2 VirtualMachineSizeTypes = "Standard_D1_v2" + // VirtualMachineSizeTypesStandardD2 ... + VirtualMachineSizeTypesStandardD2 VirtualMachineSizeTypes = "Standard_D2" + // VirtualMachineSizeTypesStandardD2sV3 ... + VirtualMachineSizeTypesStandardD2sV3 VirtualMachineSizeTypes = "Standard_D2s_v3" + // VirtualMachineSizeTypesStandardD2V2 ... + VirtualMachineSizeTypesStandardD2V2 VirtualMachineSizeTypes = "Standard_D2_v2" + // VirtualMachineSizeTypesStandardD2V3 ... + VirtualMachineSizeTypesStandardD2V3 VirtualMachineSizeTypes = "Standard_D2_v3" + // VirtualMachineSizeTypesStandardD3 ... + VirtualMachineSizeTypesStandardD3 VirtualMachineSizeTypes = "Standard_D3" + // VirtualMachineSizeTypesStandardD32sV3 ... + VirtualMachineSizeTypesStandardD32sV3 VirtualMachineSizeTypes = "Standard_D32s_v3" + // VirtualMachineSizeTypesStandardD32V3 ... + VirtualMachineSizeTypesStandardD32V3 VirtualMachineSizeTypes = "Standard_D32_v3" + // VirtualMachineSizeTypesStandardD3V2 ... + VirtualMachineSizeTypesStandardD3V2 VirtualMachineSizeTypes = "Standard_D3_v2" + // VirtualMachineSizeTypesStandardD4 ... + VirtualMachineSizeTypesStandardD4 VirtualMachineSizeTypes = "Standard_D4" + // VirtualMachineSizeTypesStandardD4sV3 ... + VirtualMachineSizeTypesStandardD4sV3 VirtualMachineSizeTypes = "Standard_D4s_v3" + // VirtualMachineSizeTypesStandardD4V2 ... + VirtualMachineSizeTypesStandardD4V2 VirtualMachineSizeTypes = "Standard_D4_v2" + // VirtualMachineSizeTypesStandardD4V3 ... + VirtualMachineSizeTypesStandardD4V3 VirtualMachineSizeTypes = "Standard_D4_v3" + // VirtualMachineSizeTypesStandardD5V2 ... + VirtualMachineSizeTypesStandardD5V2 VirtualMachineSizeTypes = "Standard_D5_v2" + // VirtualMachineSizeTypesStandardD64sV3 ... + VirtualMachineSizeTypesStandardD64sV3 VirtualMachineSizeTypes = "Standard_D64s_v3" + // VirtualMachineSizeTypesStandardD64V3 ... + VirtualMachineSizeTypesStandardD64V3 VirtualMachineSizeTypes = "Standard_D64_v3" + // VirtualMachineSizeTypesStandardD8sV3 ... + VirtualMachineSizeTypesStandardD8sV3 VirtualMachineSizeTypes = "Standard_D8s_v3" + // VirtualMachineSizeTypesStandardD8V3 ... + VirtualMachineSizeTypesStandardD8V3 VirtualMachineSizeTypes = "Standard_D8_v3" + // VirtualMachineSizeTypesStandardDS1 ... + VirtualMachineSizeTypesStandardDS1 VirtualMachineSizeTypes = "Standard_DS1" + // VirtualMachineSizeTypesStandardDS11 ... + VirtualMachineSizeTypesStandardDS11 VirtualMachineSizeTypes = "Standard_DS11" + // VirtualMachineSizeTypesStandardDS11V2 ... + VirtualMachineSizeTypesStandardDS11V2 VirtualMachineSizeTypes = "Standard_DS11_v2" + // VirtualMachineSizeTypesStandardDS12 ... + VirtualMachineSizeTypesStandardDS12 VirtualMachineSizeTypes = "Standard_DS12" + // VirtualMachineSizeTypesStandardDS12V2 ... + VirtualMachineSizeTypesStandardDS12V2 VirtualMachineSizeTypes = "Standard_DS12_v2" + // VirtualMachineSizeTypesStandardDS13 ... + VirtualMachineSizeTypesStandardDS13 VirtualMachineSizeTypes = "Standard_DS13" + // VirtualMachineSizeTypesStandardDS132V2 ... + VirtualMachineSizeTypesStandardDS132V2 VirtualMachineSizeTypes = "Standard_DS13-2_v2" + // VirtualMachineSizeTypesStandardDS134V2 ... + VirtualMachineSizeTypesStandardDS134V2 VirtualMachineSizeTypes = "Standard_DS13-4_v2" + // VirtualMachineSizeTypesStandardDS13V2 ... + VirtualMachineSizeTypesStandardDS13V2 VirtualMachineSizeTypes = "Standard_DS13_v2" + // VirtualMachineSizeTypesStandardDS14 ... + VirtualMachineSizeTypesStandardDS14 VirtualMachineSizeTypes = "Standard_DS14" + // VirtualMachineSizeTypesStandardDS144V2 ... + VirtualMachineSizeTypesStandardDS144V2 VirtualMachineSizeTypes = "Standard_DS14-4_v2" + // VirtualMachineSizeTypesStandardDS148V2 ... + VirtualMachineSizeTypesStandardDS148V2 VirtualMachineSizeTypes = "Standard_DS14-8_v2" + // VirtualMachineSizeTypesStandardDS14V2 ... + VirtualMachineSizeTypesStandardDS14V2 VirtualMachineSizeTypes = "Standard_DS14_v2" + // VirtualMachineSizeTypesStandardDS15V2 ... + VirtualMachineSizeTypesStandardDS15V2 VirtualMachineSizeTypes = "Standard_DS15_v2" + // VirtualMachineSizeTypesStandardDS1V2 ... + VirtualMachineSizeTypesStandardDS1V2 VirtualMachineSizeTypes = "Standard_DS1_v2" + // VirtualMachineSizeTypesStandardDS2 ... + VirtualMachineSizeTypesStandardDS2 VirtualMachineSizeTypes = "Standard_DS2" + // VirtualMachineSizeTypesStandardDS2V2 ... + VirtualMachineSizeTypesStandardDS2V2 VirtualMachineSizeTypes = "Standard_DS2_v2" + // VirtualMachineSizeTypesStandardDS3 ... + VirtualMachineSizeTypesStandardDS3 VirtualMachineSizeTypes = "Standard_DS3" + // VirtualMachineSizeTypesStandardDS3V2 ... + VirtualMachineSizeTypesStandardDS3V2 VirtualMachineSizeTypes = "Standard_DS3_v2" + // VirtualMachineSizeTypesStandardDS4 ... + VirtualMachineSizeTypesStandardDS4 VirtualMachineSizeTypes = "Standard_DS4" + // VirtualMachineSizeTypesStandardDS4V2 ... + VirtualMachineSizeTypesStandardDS4V2 VirtualMachineSizeTypes = "Standard_DS4_v2" + // VirtualMachineSizeTypesStandardDS5V2 ... + VirtualMachineSizeTypesStandardDS5V2 VirtualMachineSizeTypes = "Standard_DS5_v2" + // VirtualMachineSizeTypesStandardE16sV3 ... + VirtualMachineSizeTypesStandardE16sV3 VirtualMachineSizeTypes = "Standard_E16s_v3" + // VirtualMachineSizeTypesStandardE16V3 ... + VirtualMachineSizeTypesStandardE16V3 VirtualMachineSizeTypes = "Standard_E16_v3" + // VirtualMachineSizeTypesStandardE2sV3 ... + VirtualMachineSizeTypesStandardE2sV3 VirtualMachineSizeTypes = "Standard_E2s_v3" + // VirtualMachineSizeTypesStandardE2V3 ... + VirtualMachineSizeTypesStandardE2V3 VirtualMachineSizeTypes = "Standard_E2_v3" + // VirtualMachineSizeTypesStandardE3216V3 ... + VirtualMachineSizeTypesStandardE3216V3 VirtualMachineSizeTypes = "Standard_E32-16_v3" + // VirtualMachineSizeTypesStandardE328sV3 ... + VirtualMachineSizeTypesStandardE328sV3 VirtualMachineSizeTypes = "Standard_E32-8s_v3" + // VirtualMachineSizeTypesStandardE32sV3 ... + VirtualMachineSizeTypesStandardE32sV3 VirtualMachineSizeTypes = "Standard_E32s_v3" + // VirtualMachineSizeTypesStandardE32V3 ... + VirtualMachineSizeTypesStandardE32V3 VirtualMachineSizeTypes = "Standard_E32_v3" + // VirtualMachineSizeTypesStandardE4sV3 ... + VirtualMachineSizeTypesStandardE4sV3 VirtualMachineSizeTypes = "Standard_E4s_v3" + // VirtualMachineSizeTypesStandardE4V3 ... + VirtualMachineSizeTypesStandardE4V3 VirtualMachineSizeTypes = "Standard_E4_v3" + // VirtualMachineSizeTypesStandardE6416sV3 ... + VirtualMachineSizeTypesStandardE6416sV3 VirtualMachineSizeTypes = "Standard_E64-16s_v3" + // VirtualMachineSizeTypesStandardE6432sV3 ... + VirtualMachineSizeTypesStandardE6432sV3 VirtualMachineSizeTypes = "Standard_E64-32s_v3" + // VirtualMachineSizeTypesStandardE64sV3 ... + VirtualMachineSizeTypesStandardE64sV3 VirtualMachineSizeTypes = "Standard_E64s_v3" + // VirtualMachineSizeTypesStandardE64V3 ... + VirtualMachineSizeTypesStandardE64V3 VirtualMachineSizeTypes = "Standard_E64_v3" + // VirtualMachineSizeTypesStandardE8sV3 ... + VirtualMachineSizeTypesStandardE8sV3 VirtualMachineSizeTypes = "Standard_E8s_v3" + // VirtualMachineSizeTypesStandardE8V3 ... + VirtualMachineSizeTypesStandardE8V3 VirtualMachineSizeTypes = "Standard_E8_v3" + // VirtualMachineSizeTypesStandardF1 ... + VirtualMachineSizeTypesStandardF1 VirtualMachineSizeTypes = "Standard_F1" + // VirtualMachineSizeTypesStandardF16 ... + VirtualMachineSizeTypesStandardF16 VirtualMachineSizeTypes = "Standard_F16" + // VirtualMachineSizeTypesStandardF16s ... + VirtualMachineSizeTypesStandardF16s VirtualMachineSizeTypes = "Standard_F16s" + // VirtualMachineSizeTypesStandardF16sV2 ... + VirtualMachineSizeTypesStandardF16sV2 VirtualMachineSizeTypes = "Standard_F16s_v2" + // VirtualMachineSizeTypesStandardF1s ... + VirtualMachineSizeTypesStandardF1s VirtualMachineSizeTypes = "Standard_F1s" + // VirtualMachineSizeTypesStandardF2 ... + VirtualMachineSizeTypesStandardF2 VirtualMachineSizeTypes = "Standard_F2" + // VirtualMachineSizeTypesStandardF2s ... + VirtualMachineSizeTypesStandardF2s VirtualMachineSizeTypes = "Standard_F2s" + // VirtualMachineSizeTypesStandardF2sV2 ... + VirtualMachineSizeTypesStandardF2sV2 VirtualMachineSizeTypes = "Standard_F2s_v2" + // VirtualMachineSizeTypesStandardF32sV2 ... + VirtualMachineSizeTypesStandardF32sV2 VirtualMachineSizeTypes = "Standard_F32s_v2" + // VirtualMachineSizeTypesStandardF4 ... + VirtualMachineSizeTypesStandardF4 VirtualMachineSizeTypes = "Standard_F4" + // VirtualMachineSizeTypesStandardF4s ... + VirtualMachineSizeTypesStandardF4s VirtualMachineSizeTypes = "Standard_F4s" + // VirtualMachineSizeTypesStandardF4sV2 ... + VirtualMachineSizeTypesStandardF4sV2 VirtualMachineSizeTypes = "Standard_F4s_v2" + // VirtualMachineSizeTypesStandardF64sV2 ... + VirtualMachineSizeTypesStandardF64sV2 VirtualMachineSizeTypes = "Standard_F64s_v2" + // VirtualMachineSizeTypesStandardF72sV2 ... + VirtualMachineSizeTypesStandardF72sV2 VirtualMachineSizeTypes = "Standard_F72s_v2" + // VirtualMachineSizeTypesStandardF8 ... + VirtualMachineSizeTypesStandardF8 VirtualMachineSizeTypes = "Standard_F8" + // VirtualMachineSizeTypesStandardF8s ... + VirtualMachineSizeTypesStandardF8s VirtualMachineSizeTypes = "Standard_F8s" + // VirtualMachineSizeTypesStandardF8sV2 ... + VirtualMachineSizeTypesStandardF8sV2 VirtualMachineSizeTypes = "Standard_F8s_v2" + // VirtualMachineSizeTypesStandardG1 ... + VirtualMachineSizeTypesStandardG1 VirtualMachineSizeTypes = "Standard_G1" + // VirtualMachineSizeTypesStandardG2 ... + VirtualMachineSizeTypesStandardG2 VirtualMachineSizeTypes = "Standard_G2" + // VirtualMachineSizeTypesStandardG3 ... + VirtualMachineSizeTypesStandardG3 VirtualMachineSizeTypes = "Standard_G3" + // VirtualMachineSizeTypesStandardG4 ... + VirtualMachineSizeTypesStandardG4 VirtualMachineSizeTypes = "Standard_G4" + // VirtualMachineSizeTypesStandardG5 ... + VirtualMachineSizeTypesStandardG5 VirtualMachineSizeTypes = "Standard_G5" + // VirtualMachineSizeTypesStandardGS1 ... + VirtualMachineSizeTypesStandardGS1 VirtualMachineSizeTypes = "Standard_GS1" + // VirtualMachineSizeTypesStandardGS2 ... + VirtualMachineSizeTypesStandardGS2 VirtualMachineSizeTypes = "Standard_GS2" + // VirtualMachineSizeTypesStandardGS3 ... + VirtualMachineSizeTypesStandardGS3 VirtualMachineSizeTypes = "Standard_GS3" + // VirtualMachineSizeTypesStandardGS4 ... + VirtualMachineSizeTypesStandardGS4 VirtualMachineSizeTypes = "Standard_GS4" + // VirtualMachineSizeTypesStandardGS44 ... + VirtualMachineSizeTypesStandardGS44 VirtualMachineSizeTypes = "Standard_GS4-4" + // VirtualMachineSizeTypesStandardGS48 ... + VirtualMachineSizeTypesStandardGS48 VirtualMachineSizeTypes = "Standard_GS4-8" + // VirtualMachineSizeTypesStandardGS5 ... + VirtualMachineSizeTypesStandardGS5 VirtualMachineSizeTypes = "Standard_GS5" + // VirtualMachineSizeTypesStandardGS516 ... + VirtualMachineSizeTypesStandardGS516 VirtualMachineSizeTypes = "Standard_GS5-16" + // VirtualMachineSizeTypesStandardGS58 ... + VirtualMachineSizeTypesStandardGS58 VirtualMachineSizeTypes = "Standard_GS5-8" + // VirtualMachineSizeTypesStandardH16 ... + VirtualMachineSizeTypesStandardH16 VirtualMachineSizeTypes = "Standard_H16" + // VirtualMachineSizeTypesStandardH16m ... + VirtualMachineSizeTypesStandardH16m VirtualMachineSizeTypes = "Standard_H16m" + // VirtualMachineSizeTypesStandardH16mr ... + VirtualMachineSizeTypesStandardH16mr VirtualMachineSizeTypes = "Standard_H16mr" + // VirtualMachineSizeTypesStandardH16r ... + VirtualMachineSizeTypesStandardH16r VirtualMachineSizeTypes = "Standard_H16r" + // VirtualMachineSizeTypesStandardH8 ... + VirtualMachineSizeTypesStandardH8 VirtualMachineSizeTypes = "Standard_H8" + // VirtualMachineSizeTypesStandardH8m ... + VirtualMachineSizeTypesStandardH8m VirtualMachineSizeTypes = "Standard_H8m" + // VirtualMachineSizeTypesStandardL16s ... + VirtualMachineSizeTypesStandardL16s VirtualMachineSizeTypes = "Standard_L16s" + // VirtualMachineSizeTypesStandardL32s ... + VirtualMachineSizeTypesStandardL32s VirtualMachineSizeTypes = "Standard_L32s" + // VirtualMachineSizeTypesStandardL4s ... + VirtualMachineSizeTypesStandardL4s VirtualMachineSizeTypes = "Standard_L4s" + // VirtualMachineSizeTypesStandardL8s ... + VirtualMachineSizeTypesStandardL8s VirtualMachineSizeTypes = "Standard_L8s" + // VirtualMachineSizeTypesStandardM12832ms ... + VirtualMachineSizeTypesStandardM12832ms VirtualMachineSizeTypes = "Standard_M128-32ms" + // VirtualMachineSizeTypesStandardM12864ms ... + VirtualMachineSizeTypesStandardM12864ms VirtualMachineSizeTypes = "Standard_M128-64ms" + // VirtualMachineSizeTypesStandardM128ms ... + VirtualMachineSizeTypesStandardM128ms VirtualMachineSizeTypes = "Standard_M128ms" + // VirtualMachineSizeTypesStandardM128s ... + VirtualMachineSizeTypesStandardM128s VirtualMachineSizeTypes = "Standard_M128s" + // VirtualMachineSizeTypesStandardM6416ms ... + VirtualMachineSizeTypesStandardM6416ms VirtualMachineSizeTypes = "Standard_M64-16ms" + // VirtualMachineSizeTypesStandardM6432ms ... + VirtualMachineSizeTypesStandardM6432ms VirtualMachineSizeTypes = "Standard_M64-32ms" + // VirtualMachineSizeTypesStandardM64ms ... + VirtualMachineSizeTypesStandardM64ms VirtualMachineSizeTypes = "Standard_M64ms" + // VirtualMachineSizeTypesStandardM64s ... + VirtualMachineSizeTypesStandardM64s VirtualMachineSizeTypes = "Standard_M64s" + // VirtualMachineSizeTypesStandardNC12 ... + VirtualMachineSizeTypesStandardNC12 VirtualMachineSizeTypes = "Standard_NC12" + // VirtualMachineSizeTypesStandardNC12sV2 ... + VirtualMachineSizeTypesStandardNC12sV2 VirtualMachineSizeTypes = "Standard_NC12s_v2" + // VirtualMachineSizeTypesStandardNC12sV3 ... + VirtualMachineSizeTypesStandardNC12sV3 VirtualMachineSizeTypes = "Standard_NC12s_v3" + // VirtualMachineSizeTypesStandardNC24 ... + VirtualMachineSizeTypesStandardNC24 VirtualMachineSizeTypes = "Standard_NC24" + // VirtualMachineSizeTypesStandardNC24r ... + VirtualMachineSizeTypesStandardNC24r VirtualMachineSizeTypes = "Standard_NC24r" + // VirtualMachineSizeTypesStandardNC24rsV2 ... + VirtualMachineSizeTypesStandardNC24rsV2 VirtualMachineSizeTypes = "Standard_NC24rs_v2" + // VirtualMachineSizeTypesStandardNC24rsV3 ... + VirtualMachineSizeTypesStandardNC24rsV3 VirtualMachineSizeTypes = "Standard_NC24rs_v3" + // VirtualMachineSizeTypesStandardNC24sV2 ... + VirtualMachineSizeTypesStandardNC24sV2 VirtualMachineSizeTypes = "Standard_NC24s_v2" + // VirtualMachineSizeTypesStandardNC24sV3 ... + VirtualMachineSizeTypesStandardNC24sV3 VirtualMachineSizeTypes = "Standard_NC24s_v3" + // VirtualMachineSizeTypesStandardNC6 ... + VirtualMachineSizeTypesStandardNC6 VirtualMachineSizeTypes = "Standard_NC6" + // VirtualMachineSizeTypesStandardNC6sV2 ... + VirtualMachineSizeTypesStandardNC6sV2 VirtualMachineSizeTypes = "Standard_NC6s_v2" + // VirtualMachineSizeTypesStandardNC6sV3 ... + VirtualMachineSizeTypesStandardNC6sV3 VirtualMachineSizeTypes = "Standard_NC6s_v3" + // VirtualMachineSizeTypesStandardND12s ... + VirtualMachineSizeTypesStandardND12s VirtualMachineSizeTypes = "Standard_ND12s" + // VirtualMachineSizeTypesStandardND24rs ... + VirtualMachineSizeTypesStandardND24rs VirtualMachineSizeTypes = "Standard_ND24rs" + // VirtualMachineSizeTypesStandardND24s ... + VirtualMachineSizeTypesStandardND24s VirtualMachineSizeTypes = "Standard_ND24s" + // VirtualMachineSizeTypesStandardND6s ... + VirtualMachineSizeTypesStandardND6s VirtualMachineSizeTypes = "Standard_ND6s" + // VirtualMachineSizeTypesStandardNV12 ... + VirtualMachineSizeTypesStandardNV12 VirtualMachineSizeTypes = "Standard_NV12" + // VirtualMachineSizeTypesStandardNV24 ... + VirtualMachineSizeTypesStandardNV24 VirtualMachineSizeTypes = "Standard_NV24" + // VirtualMachineSizeTypesStandardNV6 ... + VirtualMachineSizeTypesStandardNV6 VirtualMachineSizeTypes = "Standard_NV6" +) + +// AccessURI a disk access SAS uri. +type AccessURI struct { + autorest.Response `json:"-"` + // AccessURIOutput - Operation output data (raw JSON) + *AccessURIOutput `json:"properties,omitempty"` +} + +// AccessURIOutput azure properties, including output. +type AccessURIOutput struct { + // AccessURIRaw - Operation output data (raw JSON) + *AccessURIRaw `json:"output,omitempty"` +} + +// AccessURIRaw this object gets 'bubbled up' through flattening. +type AccessURIRaw struct { + // AccessSAS - A SAS uri for accessing a disk. + AccessSAS *string `json:"accessSAS,omitempty"` +} + +// AdditionalUnattendContent specifies additional XML formatted information that can be included in the Unattend.xml +// file, which is used by Windows Setup. Contents are defined by setting name, component name, and the pass in which +// the content is applied. +type AdditionalUnattendContent struct { + // PassName - The pass name. Currently, the only allowable value is OobeSystem. Possible values include: 'OobeSystem' + PassName PassNames `json:"passName,omitempty"` + // ComponentName - The component name. Currently, the only allowable value is Microsoft-Windows-Shell-Setup. Possible values include: 'MicrosoftWindowsShellSetup' + ComponentName ComponentNames `json:"componentName,omitempty"` + // SettingName - Specifies the name of the setting to which the content applies. Possible values are: FirstLogonCommands and AutoLogon. Possible values include: 'AutoLogon', 'FirstLogonCommands' + SettingName SettingNames `json:"settingName,omitempty"` + // Content - Specifies the XML formatted content that is added to the unattend.xml file for the specified path and component. The XML must be less than 4KB and must include the root element for the setting or feature that is being inserted. + Content *string `json:"content,omitempty"` +} + +// APIEntityReference the API entity reference. +type APIEntityReference struct { + // ID - The ARM resource id in the form of /subscriptions/{SubcriptionId}/resourceGroups/{ResourceGroupName}/... + ID *string `json:"id,omitempty"` +} + +// APIError api error. +type APIError struct { + // Details - The Api error details + Details *[]APIErrorBase `json:"details,omitempty"` + // Innererror - The Api inner error + Innererror *InnerError `json:"innererror,omitempty"` + // Code - The error code. + Code *string `json:"code,omitempty"` + // Target - The target of the particular error. + Target *string `json:"target,omitempty"` + // Message - The error message. + Message *string `json:"message,omitempty"` +} + +// APIErrorBase api error base. +type APIErrorBase struct { + // Code - The error code. + Code *string `json:"code,omitempty"` + // Target - The target of the particular error. + Target *string `json:"target,omitempty"` + // Message - The error message. + Message *string `json:"message,omitempty"` +} + +// AvailabilitySet specifies information about the availability set that the virtual machine should be assigned to. +// Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. +// For more information about availability sets, see [Manage the availability of virtual +// machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). +//

For more information on Azure planned maintainance, see [Planned maintenance for virtual machines in +// Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json) +//

Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to +// an availability set. +type AvailabilitySet struct { + autorest.Response `json:"-"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` + *AvailabilitySetProperties `json:"properties,omitempty"` + // Sku - Sku of the availability set + Sku *Sku `json:"sku,omitempty"` +} + +// AvailabilitySetListResult the List Availability Set operation response. +type AvailabilitySetListResult struct { + autorest.Response `json:"-"` + // Value - The list of availability sets + Value *[]AvailabilitySet `json:"value,omitempty"` +} + +// AvailabilitySetProperties the instance view of a resource. +type AvailabilitySetProperties struct { + // PlatformUpdateDomainCount - Update Domain count. + PlatformUpdateDomainCount *int32 `json:"platformUpdateDomainCount,omitempty"` + // PlatformFaultDomainCount - Fault Domain count. + PlatformFaultDomainCount *int32 `json:"platformFaultDomainCount,omitempty"` + // VirtualMachines - A list of references to all virtual machines in the availability set. + VirtualMachines *[]SubResource `json:"virtualMachines,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// BootDiagnostics boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to +// diagnose VM status.

For Linux Virtual Machines, you can easily view the output of your console log. +//

For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from the +// hypervisor. +type BootDiagnostics struct { + // Enabled - Whether boot diagnostics should be enabled on the Virtual Machine. + Enabled *bool `json:"enabled,omitempty"` + // StorageURI - Uri of the storage account to use for placing the console output and screenshot. + StorageURI *string `json:"storageUri,omitempty"` +} + +// BootDiagnosticsInstanceView the instance view of a virtual machine boot diagnostics. +type BootDiagnosticsInstanceView struct { + // ConsoleScreenshotBlobURI - The console screenshot blob URI. + ConsoleScreenshotBlobURI *string `json:"consoleScreenshotBlobUri,omitempty"` + // SerialConsoleLogBlobURI - The Linux serial console log blob Uri. + SerialConsoleLogBlobURI *string `json:"serialConsoleLogBlobUri,omitempty"` +} + +// ContainerService container service. +type ContainerService struct { + autorest.Response `json:"-"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` + *ContainerServiceProperties `json:"properties,omitempty"` +} + +// ContainerServiceAgentPoolProfile profile for the container service agent pool. +type ContainerServiceAgentPoolProfile struct { + // Name - Unique name of the agent pool profile in the context of the subscription and resource group. + Name *string `json:"name,omitempty"` + // Count - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1. + Count *int32 `json:"count,omitempty"` + // VMSize - Size of agent VMs. Possible values include: 'StandardA0', 'StandardA1', 'StandardA2', 'StandardA3', 'StandardA4', 'StandardA5', 'StandardA6', 'StandardA7', 'StandardA8', 'StandardA9', 'StandardA10', 'StandardA11', 'StandardD1', 'StandardD2', 'StandardD3', 'StandardD4', 'StandardD11', 'StandardD12', 'StandardD13', 'StandardD14', 'StandardD1V2', 'StandardD2V2', 'StandardD3V2', 'StandardD4V2', 'StandardD5V2', 'StandardD11V2', 'StandardD12V2', 'StandardD13V2', 'StandardD14V2', 'StandardG1', 'StandardG2', 'StandardG3', 'StandardG4', 'StandardG5', 'StandardDS1', 'StandardDS2', 'StandardDS3', 'StandardDS4', 'StandardDS11', 'StandardDS12', 'StandardDS13', 'StandardDS14', 'StandardGS1', 'StandardGS2', 'StandardGS3', 'StandardGS4', 'StandardGS5' + VMSize ContainerServiceVMSizeTypes `json:"vmSize,omitempty"` + // DNSPrefix - DNS prefix to be used to create the FQDN for the agent pool. + DNSPrefix *string `json:"dnsPrefix,omitempty"` + // Fqdn - FDQN for the agent pool. + Fqdn *string `json:"fqdn,omitempty"` +} + +// ContainerServiceCustomProfile properties to configure a custom container service cluster. +type ContainerServiceCustomProfile struct { + // Orchestrator - The name of the custom orchestrator to use. + Orchestrator *string `json:"orchestrator,omitempty"` +} + +// ContainerServiceDiagnosticsProfile ... +type ContainerServiceDiagnosticsProfile struct { + // VMDiagnostics - Profile for the container service VM diagnostic agent. + VMDiagnostics *ContainerServiceVMDiagnostics `json:"vmDiagnostics,omitempty"` +} + +// ContainerServiceLinuxProfile profile for Linux VMs in the container service cluster. +type ContainerServiceLinuxProfile struct { + // AdminUsername - The administrator username to use for Linux VMs. + AdminUsername *string `json:"adminUsername,omitempty"` + // SSH - The ssh key configuration for Linux VMs. + SSH *ContainerServiceSSHConfiguration `json:"ssh,omitempty"` +} + +// ContainerServiceListResult the response from the List Container Services operation. +type ContainerServiceListResult struct { + autorest.Response `json:"-"` + // Value - the list of container services. + Value *[]ContainerService `json:"value,omitempty"` + // NextLink - The URL to get the next set of container service results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ContainerServiceListResultIterator provides access to a complete listing of ContainerService values. +type ContainerServiceListResultIterator struct { + i int + page ContainerServiceListResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ContainerServiceListResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ContainerServiceListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ContainerServiceListResultIterator) Response() ContainerServiceListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ContainerServiceListResultIterator) Value() ContainerService { + if !iter.page.NotDone() { + return ContainerService{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (cslr ContainerServiceListResult) IsEmpty() bool { + return cslr.Value == nil || len(*cslr.Value) == 0 +} + +// containerServiceListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (cslr ContainerServiceListResult) containerServiceListResultPreparer() (*http.Request, error) { + if cslr.NextLink == nil || len(to.String(cslr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(cslr.NextLink))) +} + +// ContainerServiceListResultPage contains a page of ContainerService values. +type ContainerServiceListResultPage struct { + fn func(ContainerServiceListResult) (ContainerServiceListResult, error) + cslr ContainerServiceListResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ContainerServiceListResultPage) Next() error { + next, err := page.fn(page.cslr) + if err != nil { + return err + } + page.cslr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ContainerServiceListResultPage) NotDone() bool { + return !page.cslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ContainerServiceListResultPage) Response() ContainerServiceListResult { + return page.cslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ContainerServiceListResultPage) Values() []ContainerService { + if page.cslr.IsEmpty() { + return nil + } + return *page.cslr.Value +} + +// ContainerServiceMasterProfile profile for the container service master. +type ContainerServiceMasterProfile struct { + // Count - Number of masters (VMs) in the container service cluster. Allowed values are 1, 3, and 5. The default value is 1. + Count *int32 `json:"count,omitempty"` + // DNSPrefix - DNS prefix to be used to create the FQDN for master. + DNSPrefix *string `json:"dnsPrefix,omitempty"` + // Fqdn - FDQN for the master. + Fqdn *string `json:"fqdn,omitempty"` +} + +// ContainerServiceOrchestratorProfile profile for the container service orchestrator. +type ContainerServiceOrchestratorProfile struct { + // OrchestratorType - The orchestrator to use to manage container service cluster resources. Valid values are Swarm, DCOS, and Custom. Possible values include: 'Swarm', 'DCOS', 'Custom', 'Kubernetes' + OrchestratorType ContainerServiceOrchestratorTypes `json:"orchestratorType,omitempty"` +} + +// ContainerServiceProperties properties of the container service. +type ContainerServiceProperties struct { + // ProvisioningState - the current deployment or provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // OrchestratorProfile - Properties of the orchestrator. + OrchestratorProfile *ContainerServiceOrchestratorProfile `json:"orchestratorProfile,omitempty"` + // CustomProfile - Properties for custom clusters. + CustomProfile *ContainerServiceCustomProfile `json:"customProfile,omitempty"` + // ServicePrincipalProfile - Properties for cluster service principals. + ServicePrincipalProfile *ContainerServiceServicePrincipalProfile `json:"servicePrincipalProfile,omitempty"` + // MasterProfile - Properties of master agents. + MasterProfile *ContainerServiceMasterProfile `json:"masterProfile,omitempty"` + // AgentPoolProfiles - Properties of the agent pool. + AgentPoolProfiles *[]ContainerServiceAgentPoolProfile `json:"agentPoolProfiles,omitempty"` + // WindowsProfile - Properties of Windows VMs. + WindowsProfile *ContainerServiceWindowsProfile `json:"windowsProfile,omitempty"` + // LinuxProfile - Properties of Linux VMs. + LinuxProfile *ContainerServiceLinuxProfile `json:"linuxProfile,omitempty"` + // DiagnosticsProfile - Properties of the diagnostic agent. + DiagnosticsProfile *ContainerServiceDiagnosticsProfile `json:"diagnosticsProfile,omitempty"` +} + +// ContainerServicesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type ContainerServicesCreateOrUpdateFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future ContainerServicesCreateOrUpdateFuture) Result(client ContainerServicesClient) (cs ContainerService, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return cs, autorest.NewError("compute.ContainerServicesCreateOrUpdateFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + cs, err = client.CreateOrUpdateResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + cs, err = client.CreateOrUpdateResponder(resp) + return +} + +// ContainerServicesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type ContainerServicesDeleteFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future ContainerServicesDeleteFuture) Result(client ContainerServicesClient) (ar autorest.Response, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return ar, autorest.NewError("compute.ContainerServicesDeleteFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + ar, err = client.DeleteResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + ar, err = client.DeleteResponder(resp) + return +} + +// ContainerServiceServicePrincipalProfile information about a service principal identity for the cluster to use for +// manipulating Azure APIs. +type ContainerServiceServicePrincipalProfile struct { + // ClientID - The ID for the service principal. + ClientID *string `json:"clientId,omitempty"` + // Secret - The secret password associated with the service principal. + Secret *string `json:"secret,omitempty"` +} + +// ContainerServiceSSHConfiguration SSH configuration for Linux-based VMs running on Azure. +type ContainerServiceSSHConfiguration struct { + // PublicKeys - the list of SSH public keys used to authenticate with Linux-based VMs. + PublicKeys *[]ContainerServiceSSHPublicKey `json:"publicKeys,omitempty"` +} + +// ContainerServiceSSHPublicKey contains information about SSH certificate public key data. +type ContainerServiceSSHPublicKey struct { + // KeyData - Certificate public key used to authenticate with VMs through SSH. The certificate must be in PEM format with or without headers. + KeyData *string `json:"keyData,omitempty"` +} + +// ContainerServiceVMDiagnostics profile for diagnostics on the container service VMs. +type ContainerServiceVMDiagnostics struct { + // Enabled - Whether the VM diagnostic agent is provisioned on the VM. + Enabled *bool `json:"enabled,omitempty"` + // StorageURI - The URI of the storage account where diagnostics are stored. + StorageURI *string `json:"storageUri,omitempty"` +} + +// ContainerServiceWindowsProfile profile for Windows VMs in the container service cluster. +type ContainerServiceWindowsProfile struct { + // AdminUsername - The administrator username to use for Windows VMs. + AdminUsername *string `json:"adminUsername,omitempty"` + // AdminPassword - The administrator password to use for Windows VMs. + AdminPassword *string `json:"adminPassword,omitempty"` +} + +// CreationData data used when creating a disk. +type CreationData struct { + // CreateOption - This enumerates the possible sources of a disk's creation. Possible values include: 'Empty', 'Attach', 'FromImage', 'Import', 'Copy' + CreateOption DiskCreateOption `json:"createOption,omitempty"` + // StorageAccountID - If createOption is Import, the Azure Resource Manager identifier of the storage account containing the blob to import as a disk. Required only if the blob is in a different subscription + StorageAccountID *string `json:"storageAccountId,omitempty"` + // ImageReference - Disk source information. + ImageReference *ImageDiskReference `json:"imageReference,omitempty"` + // SourceURI - If createOption is Import, this is the URI of a blob to be imported into a managed disk. + SourceURI *string `json:"sourceUri,omitempty"` + // SourceResourceID - If createOption is Copy, this is the ARM id of the source snapshot or disk. + SourceResourceID *string `json:"sourceResourceId,omitempty"` +} + +// DataDisk describes a data disk. +type DataDisk struct { + // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty"` + // Name - The disk name. + Name *string `json:"name,omitempty"` + // Vhd - The virtual hard disk. + Vhd *VirtualHardDisk `json:"vhd,omitempty"` + // Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. + Image *VirtualHardDisk `json:"image,omitempty"` + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` + // CreateOption - Specifies how the virtual machine should be created.

Possible values are:

**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // ManagedDisk - The managed disk parameters. + ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` +} + +// DataDiskImage contains the data disk images information. +type DataDiskImage struct { + // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty"` +} + +// DiagnosticsProfile specifies the boot diagnostic settings state.

Minimum api-version: 2015-06-15. +type DiagnosticsProfile struct { + // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

For Linux Virtual Machines, you can easily view the output of your console log.

For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from the hypervisor. + BootDiagnostics *BootDiagnostics `json:"bootDiagnostics,omitempty"` +} + +// Disk disk resource. +type Disk struct { + autorest.Response `json:"-"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` + // ManagedBy - A relative URI containing the ID of the VM that has the disk attached. + ManagedBy *string `json:"managedBy,omitempty"` + Sku *DiskSku `json:"sku,omitempty"` + // Zones - The Logical zone list for Disk. + Zones *[]string `json:"zones,omitempty"` + *DiskProperties `json:"properties,omitempty"` +} + +// DiskEncryptionSettings describes a Encryption Settings for a Disk +type DiskEncryptionSettings struct { + // DiskEncryptionKey - Specifies the location of the disk encryption key, which is a Key Vault Secret. + DiskEncryptionKey *KeyVaultSecretReference `json:"diskEncryptionKey,omitempty"` + // KeyEncryptionKey - Specifies the location of the key encryption key in Key Vault. + KeyEncryptionKey *KeyVaultKeyReference `json:"keyEncryptionKey,omitempty"` + // Enabled - Specifies whether disk encryption should be enabled on the virtual machine. + Enabled *bool `json:"enabled,omitempty"` +} + +// DiskInstanceView the instance view of the disk. +type DiskInstanceView struct { + // Name - The disk name. + Name *string `json:"name,omitempty"` + // EncryptionSettings - Specifies the encryption settings for the OS Disk.

Minimum api-version: 2015-06-15 + EncryptionSettings *[]DiskEncryptionSettings `json:"encryptionSettings,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// DiskList the List Disks operation response. +type DiskList struct { + autorest.Response `json:"-"` + // Value - A list of disks. + Value *[]Disk `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of disks. Call ListNext() with this to fetch the next page of disks. + NextLink *string `json:"nextLink,omitempty"` +} + +// DiskListIterator provides access to a complete listing of Disk values. +type DiskListIterator struct { + i int + page DiskListPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DiskListIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DiskListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DiskListIterator) Response() DiskList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DiskListIterator) Value() Disk { + if !iter.page.NotDone() { + return Disk{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (dl DiskList) IsEmpty() bool { + return dl.Value == nil || len(*dl.Value) == 0 +} + +// diskListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dl DiskList) diskListPreparer() (*http.Request, error) { + if dl.NextLink == nil || len(to.String(dl.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dl.NextLink))) +} + +// DiskListPage contains a page of Disk values. +type DiskListPage struct { + fn func(DiskList) (DiskList, error) + dl DiskList +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DiskListPage) Next() error { + next, err := page.fn(page.dl) + if err != nil { + return err + } + page.dl = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DiskListPage) NotDone() bool { + return !page.dl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DiskListPage) Response() DiskList { + return page.dl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DiskListPage) Values() []Disk { + if page.dl.IsEmpty() { + return nil + } + return *page.dl.Value +} + +// DiskProperties disk resource properties. +type DiskProperties struct { + // TimeCreated - The time when the disk was created. + TimeCreated *date.Time `json:"timeCreated,omitempty"` + // OsType - The Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // CreationData - Disk source information. CreationData information cannot be changed after the disk has been created. + CreationData *CreationData `json:"creationData,omitempty"` + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // EncryptionSettings - Encryption settings for disk or snapshot + EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` + // ProvisioningState - The disk provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// DisksCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksCreateOrUpdateFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future DisksCreateOrUpdateFuture) Result(client DisksClient) (d Disk, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return d, autorest.NewError("compute.DisksCreateOrUpdateFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + d, err = client.CreateOrUpdateResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + d, err = client.CreateOrUpdateResponder(resp) + return +} + +// DisksDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksDeleteFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future DisksDeleteFuture) Result(client DisksClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.DisksDeleteFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.DeleteResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.DeleteResponder(resp) + return +} + +// DisksGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksGrantAccessFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future DisksGrantAccessFuture) Result(client DisksClient) (au AccessURI, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return au, autorest.NewError("compute.DisksGrantAccessFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + au, err = client.GrantAccessResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + au, err = client.GrantAccessResponder(resp) + return +} + +// DiskSku the disks and snapshots sku name. Can be Standard_LRS or Premium_LRS. +type DiskSku struct { + // Name - The sku name. Possible values include: 'StandardLRS', 'PremiumLRS' + Name StorageAccountTypes `json:"name,omitempty"` + // Tier - The sku tier. + Tier *string `json:"tier,omitempty"` +} + +// DisksRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksRevokeAccessFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future DisksRevokeAccessFuture) Result(client DisksClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.DisksRevokeAccessFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.RevokeAccessResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.RevokeAccessResponder(resp) + return +} + +// DisksUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DisksUpdateFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future DisksUpdateFuture) Result(client DisksClient) (d Disk, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return d, autorest.NewError("compute.DisksUpdateFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + d, err = client.UpdateResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + d, err = client.UpdateResponder(resp) + return +} + +// DiskUpdate disk update resource. +type DiskUpdate struct { + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` + Sku *DiskSku `json:"sku,omitempty"` + *DiskUpdateProperties `json:"properties,omitempty"` +} + +// DiskUpdateProperties disk resource update properties. +type DiskUpdateProperties struct { + // OsType - the Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // DiskSizeGB - If creationData.createOption is Empty, this field is mandatory and it indicates the size of the VHD to create. If this field is present for updates or creation with other options, it indicates a resize. Resizes are only allowed if the disk is not attached to a running VM, and can only increase the disk's size. + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // EncryptionSettings - Encryption settings for disk or snapshot + EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` +} + +// EncryptionSettings encryption settings for disk or snapshot +type EncryptionSettings struct { + // Enabled - Set this flag to true and provide DiskEncryptionKey and optional KeyEncryptionKey to enable encryption. Set this flag to false and remove DiskEncryptionKey and KeyEncryptionKey to disable encryption. If EncryptionSettings is null in the request object, the existing settings remain unchanged. + Enabled *bool `json:"enabled,omitempty"` + // DiskEncryptionKey - Key Vault Secret Url and vault id of the disk encryption key + DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"` + // KeyEncryptionKey - Key Vault Key Url and vault id of the key encryption key + KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"` +} + +// GrantAccessData data used for requesting a SAS. +type GrantAccessData struct { + // Access - Possible values include: 'None', 'Read' + Access AccessLevel `json:"access,omitempty"` + // DurationInSeconds - Time duration in seconds until the SAS access expires. + DurationInSeconds *int32 `json:"durationInSeconds,omitempty"` +} + +// HardwareProfile specifies the hardware settings for the virtual machine. +type HardwareProfile struct { + // VMSize - Specifies the size of the virtual machine. For more information about virtual machine sizes, see [Sizes for virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-sizes?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

The available VM sizes depend on region and availability set. For a list of available sizes use these APIs:

[List all available virtual machine sizes in an availability set](virtualmachines-list-sizes-availability-set.md)

[List all available virtual machine sizes in a region](virtualmachines-list-sizes-region.md)

[List all available virtual machine sizes for resizing](virtualmachines-list-sizes-for-resizing.md). Possible values include: 'VirtualMachineSizeTypesBasicA0', 'VirtualMachineSizeTypesBasicA1', 'VirtualMachineSizeTypesBasicA2', 'VirtualMachineSizeTypesBasicA3', 'VirtualMachineSizeTypesBasicA4', 'VirtualMachineSizeTypesStandardA0', 'VirtualMachineSizeTypesStandardA1', 'VirtualMachineSizeTypesStandardA2', 'VirtualMachineSizeTypesStandardA3', 'VirtualMachineSizeTypesStandardA4', 'VirtualMachineSizeTypesStandardA5', 'VirtualMachineSizeTypesStandardA6', 'VirtualMachineSizeTypesStandardA7', 'VirtualMachineSizeTypesStandardA8', 'VirtualMachineSizeTypesStandardA9', 'VirtualMachineSizeTypesStandardA10', 'VirtualMachineSizeTypesStandardA11', 'VirtualMachineSizeTypesStandardA1V2', 'VirtualMachineSizeTypesStandardA2V2', 'VirtualMachineSizeTypesStandardA4V2', 'VirtualMachineSizeTypesStandardA8V2', 'VirtualMachineSizeTypesStandardA2mV2', 'VirtualMachineSizeTypesStandardA4mV2', 'VirtualMachineSizeTypesStandardA8mV2', 'VirtualMachineSizeTypesStandardB1s', 'VirtualMachineSizeTypesStandardB1ms', 'VirtualMachineSizeTypesStandardB2s', 'VirtualMachineSizeTypesStandardB2ms', 'VirtualMachineSizeTypesStandardB4ms', 'VirtualMachineSizeTypesStandardB8ms', 'VirtualMachineSizeTypesStandardD1', 'VirtualMachineSizeTypesStandardD2', 'VirtualMachineSizeTypesStandardD3', 'VirtualMachineSizeTypesStandardD4', 'VirtualMachineSizeTypesStandardD11', 'VirtualMachineSizeTypesStandardD12', 'VirtualMachineSizeTypesStandardD13', 'VirtualMachineSizeTypesStandardD14', 'VirtualMachineSizeTypesStandardD1V2', 'VirtualMachineSizeTypesStandardD2V2', 'VirtualMachineSizeTypesStandardD3V2', 'VirtualMachineSizeTypesStandardD4V2', 'VirtualMachineSizeTypesStandardD5V2', 'VirtualMachineSizeTypesStandardD2V3', 'VirtualMachineSizeTypesStandardD4V3', 'VirtualMachineSizeTypesStandardD8V3', 'VirtualMachineSizeTypesStandardD16V3', 'VirtualMachineSizeTypesStandardD32V3', 'VirtualMachineSizeTypesStandardD64V3', 'VirtualMachineSizeTypesStandardD2sV3', 'VirtualMachineSizeTypesStandardD4sV3', 'VirtualMachineSizeTypesStandardD8sV3', 'VirtualMachineSizeTypesStandardD16sV3', 'VirtualMachineSizeTypesStandardD32sV3', 'VirtualMachineSizeTypesStandardD64sV3', 'VirtualMachineSizeTypesStandardD11V2', 'VirtualMachineSizeTypesStandardD12V2', 'VirtualMachineSizeTypesStandardD13V2', 'VirtualMachineSizeTypesStandardD14V2', 'VirtualMachineSizeTypesStandardD15V2', 'VirtualMachineSizeTypesStandardDS1', 'VirtualMachineSizeTypesStandardDS2', 'VirtualMachineSizeTypesStandardDS3', 'VirtualMachineSizeTypesStandardDS4', 'VirtualMachineSizeTypesStandardDS11', 'VirtualMachineSizeTypesStandardDS12', 'VirtualMachineSizeTypesStandardDS13', 'VirtualMachineSizeTypesStandardDS14', 'VirtualMachineSizeTypesStandardDS1V2', 'VirtualMachineSizeTypesStandardDS2V2', 'VirtualMachineSizeTypesStandardDS3V2', 'VirtualMachineSizeTypesStandardDS4V2', 'VirtualMachineSizeTypesStandardDS5V2', 'VirtualMachineSizeTypesStandardDS11V2', 'VirtualMachineSizeTypesStandardDS12V2', 'VirtualMachineSizeTypesStandardDS13V2', 'VirtualMachineSizeTypesStandardDS14V2', 'VirtualMachineSizeTypesStandardDS15V2', 'VirtualMachineSizeTypesStandardDS134V2', 'VirtualMachineSizeTypesStandardDS132V2', 'VirtualMachineSizeTypesStandardDS148V2', 'VirtualMachineSizeTypesStandardDS144V2', 'VirtualMachineSizeTypesStandardE2V3', 'VirtualMachineSizeTypesStandardE4V3', 'VirtualMachineSizeTypesStandardE8V3', 'VirtualMachineSizeTypesStandardE16V3', 'VirtualMachineSizeTypesStandardE32V3', 'VirtualMachineSizeTypesStandardE64V3', 'VirtualMachineSizeTypesStandardE2sV3', 'VirtualMachineSizeTypesStandardE4sV3', 'VirtualMachineSizeTypesStandardE8sV3', 'VirtualMachineSizeTypesStandardE16sV3', 'VirtualMachineSizeTypesStandardE32sV3', 'VirtualMachineSizeTypesStandardE64sV3', 'VirtualMachineSizeTypesStandardE3216V3', 'VirtualMachineSizeTypesStandardE328sV3', 'VirtualMachineSizeTypesStandardE6432sV3', 'VirtualMachineSizeTypesStandardE6416sV3', 'VirtualMachineSizeTypesStandardF1', 'VirtualMachineSizeTypesStandardF2', 'VirtualMachineSizeTypesStandardF4', 'VirtualMachineSizeTypesStandardF8', 'VirtualMachineSizeTypesStandardF16', 'VirtualMachineSizeTypesStandardF1s', 'VirtualMachineSizeTypesStandardF2s', 'VirtualMachineSizeTypesStandardF4s', 'VirtualMachineSizeTypesStandardF8s', 'VirtualMachineSizeTypesStandardF16s', 'VirtualMachineSizeTypesStandardF2sV2', 'VirtualMachineSizeTypesStandardF4sV2', 'VirtualMachineSizeTypesStandardF8sV2', 'VirtualMachineSizeTypesStandardF16sV2', 'VirtualMachineSizeTypesStandardF32sV2', 'VirtualMachineSizeTypesStandardF64sV2', 'VirtualMachineSizeTypesStandardF72sV2', 'VirtualMachineSizeTypesStandardG1', 'VirtualMachineSizeTypesStandardG2', 'VirtualMachineSizeTypesStandardG3', 'VirtualMachineSizeTypesStandardG4', 'VirtualMachineSizeTypesStandardG5', 'VirtualMachineSizeTypesStandardGS1', 'VirtualMachineSizeTypesStandardGS2', 'VirtualMachineSizeTypesStandardGS3', 'VirtualMachineSizeTypesStandardGS4', 'VirtualMachineSizeTypesStandardGS5', 'VirtualMachineSizeTypesStandardGS48', 'VirtualMachineSizeTypesStandardGS44', 'VirtualMachineSizeTypesStandardGS516', 'VirtualMachineSizeTypesStandardGS58', 'VirtualMachineSizeTypesStandardH8', 'VirtualMachineSizeTypesStandardH16', 'VirtualMachineSizeTypesStandardH8m', 'VirtualMachineSizeTypesStandardH16m', 'VirtualMachineSizeTypesStandardH16r', 'VirtualMachineSizeTypesStandardH16mr', 'VirtualMachineSizeTypesStandardL4s', 'VirtualMachineSizeTypesStandardL8s', 'VirtualMachineSizeTypesStandardL16s', 'VirtualMachineSizeTypesStandardL32s', 'VirtualMachineSizeTypesStandardM64s', 'VirtualMachineSizeTypesStandardM64ms', 'VirtualMachineSizeTypesStandardM128s', 'VirtualMachineSizeTypesStandardM128ms', 'VirtualMachineSizeTypesStandardM6432ms', 'VirtualMachineSizeTypesStandardM6416ms', 'VirtualMachineSizeTypesStandardM12864ms', 'VirtualMachineSizeTypesStandardM12832ms', 'VirtualMachineSizeTypesStandardNC6', 'VirtualMachineSizeTypesStandardNC12', 'VirtualMachineSizeTypesStandardNC24', 'VirtualMachineSizeTypesStandardNC24r', 'VirtualMachineSizeTypesStandardNC6sV2', 'VirtualMachineSizeTypesStandardNC12sV2', 'VirtualMachineSizeTypesStandardNC24sV2', 'VirtualMachineSizeTypesStandardNC24rsV2', 'VirtualMachineSizeTypesStandardNC6sV3', 'VirtualMachineSizeTypesStandardNC12sV3', 'VirtualMachineSizeTypesStandardNC24sV3', 'VirtualMachineSizeTypesStandardNC24rsV3', 'VirtualMachineSizeTypesStandardND6s', 'VirtualMachineSizeTypesStandardND12s', 'VirtualMachineSizeTypesStandardND24s', 'VirtualMachineSizeTypesStandardND24rs', 'VirtualMachineSizeTypesStandardNV6', 'VirtualMachineSizeTypesStandardNV12', 'VirtualMachineSizeTypesStandardNV24' + VMSize VirtualMachineSizeTypes `json:"vmSize,omitempty"` +} + +// Image the source user image virtual hard disk. The virtual hard disk will be copied before being attached to the +// virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. +type Image struct { + autorest.Response `json:"-"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` + *ImageProperties `json:"properties,omitempty"` +} + +// ImageDataDisk describes a data disk. +type ImageDataDisk struct { + // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty"` + // Snapshot - The snapshot. + Snapshot *SubResource `json:"snapshot,omitempty"` + // ManagedDisk - The managedDisk. + ManagedDisk *SubResource `json:"managedDisk,omitempty"` + // BlobURI - The Virtual Hard Disk. + BlobURI *string `json:"blobUri,omitempty"` + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // StorageAccountType - Specifies the storage account type for the managed disk. Possible values are: Standard_LRS or Premium_LRS. Possible values include: 'StandardLRS', 'PremiumLRS' + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` +} + +// ImageDiskReference the source image used for creating the disk. +type ImageDiskReference struct { + // ID - A relative uri containing either a Platform Imgage Repository or user image reference. + ID *string `json:"id,omitempty"` + // Lun - If the disk is created from an image's data disk, this is an index that indicates which of the data disks in the image to use. For OS disks, this field is null. + Lun *int32 `json:"lun,omitempty"` +} + +// ImageListResult the List Image operation response. +type ImageListResult struct { + autorest.Response `json:"-"` + // Value - The list of Images. + Value *[]Image `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Images. Call ListNext() with this to fetch the next page of Images. + NextLink *string `json:"nextLink,omitempty"` +} + +// ImageListResultIterator provides access to a complete listing of Image values. +type ImageListResultIterator struct { + i int + page ImageListResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ImageListResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ImageListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ImageListResultIterator) Response() ImageListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ImageListResultIterator) Value() Image { + if !iter.page.NotDone() { + return Image{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (ilr ImageListResult) IsEmpty() bool { + return ilr.Value == nil || len(*ilr.Value) == 0 +} + +// imageListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (ilr ImageListResult) imageListResultPreparer() (*http.Request, error) { + if ilr.NextLink == nil || len(to.String(ilr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(ilr.NextLink))) +} + +// ImageListResultPage contains a page of Image values. +type ImageListResultPage struct { + fn func(ImageListResult) (ImageListResult, error) + ilr ImageListResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ImageListResultPage) Next() error { + next, err := page.fn(page.ilr) + if err != nil { + return err + } + page.ilr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ImageListResultPage) NotDone() bool { + return !page.ilr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ImageListResultPage) Response() ImageListResult { + return page.ilr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ImageListResultPage) Values() []Image { + if page.ilr.IsEmpty() { + return nil + } + return *page.ilr.Value +} + +// ImageOSDisk describes an Operating System disk. +type ImageOSDisk struct { + // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from a custom image.

Possible values are:

**Windows**

**Linux**. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // OsState - The OS State. Possible values include: 'Generalized', 'Specialized' + OsState OperatingSystemStateTypes `json:"osState,omitempty"` + // Snapshot - The snapshot. + Snapshot *SubResource `json:"snapshot,omitempty"` + // ManagedDisk - The managedDisk. + ManagedDisk *SubResource `json:"managedDisk,omitempty"` + // BlobURI - The Virtual Hard Disk. + BlobURI *string `json:"blobUri,omitempty"` + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // DiskSizeGB - Specifies the size of empty data disks in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // StorageAccountType - Specifies the storage account type for the managed disk. Possible values are: Standard_LRS or Premium_LRS. Possible values include: 'StandardLRS', 'PremiumLRS' + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` +} + +// ImageProperties describes the properties of an Image. +type ImageProperties struct { + // SourceVirtualMachine - The source virtual machine from which Image is created. + SourceVirtualMachine *SubResource `json:"sourceVirtualMachine,omitempty"` + // StorageProfile - Specifies the storage settings for the virtual machine disks. + StorageProfile *ImageStorageProfile `json:"storageProfile,omitempty"` + // ProvisioningState - The provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// ImageReference specifies information about the image to use. You can specify information about platform images, +// marketplace images, or virtual machine images. This element is required when you want to use a platform image, +// marketplace image, or virtual machine image, but is not used in other creation operations. +type ImageReference struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Publisher - The image publisher. + Publisher *string `json:"publisher,omitempty"` + // Offer - Specifies the offer of the platform image or marketplace image used to create the virtual machine. + Offer *string `json:"offer,omitempty"` + // Sku - The image SKU. + Sku *string `json:"sku,omitempty"` + // Version - Specifies the version of the platform image or marketplace image used to create the virtual machine. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available. + Version *string `json:"version,omitempty"` +} + +// ImagesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type ImagesCreateOrUpdateFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future ImagesCreateOrUpdateFuture) Result(client ImagesClient) (i Image, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return i, autorest.NewError("compute.ImagesCreateOrUpdateFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + i, err = client.CreateOrUpdateResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + i, err = client.CreateOrUpdateResponder(resp) + return +} + +// ImagesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type ImagesDeleteFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future ImagesDeleteFuture) Result(client ImagesClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.ImagesDeleteFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.DeleteResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.DeleteResponder(resp) + return +} + +// ImageStorageProfile describes a storage profile. +type ImageStorageProfile struct { + // OsDisk - Specifies information about the operating system disk used by the virtual machine.

For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). + OsDisk *ImageOSDisk `json:"osDisk,omitempty"` + // DataDisks - Specifies the parameters that are used to add a data disk to a virtual machine.

For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). + DataDisks *[]ImageDataDisk `json:"dataDisks,omitempty"` +} + +// InnerError inner error details. +type InnerError struct { + // Exceptiontype - The exception type. + Exceptiontype *string `json:"exceptiontype,omitempty"` + // Errordetail - The internal error message or exception dump. + Errordetail *string `json:"errordetail,omitempty"` +} + +// InstanceViewStatus instance view status. +type InstanceViewStatus struct { + // Code - The status code. + Code *string `json:"code,omitempty"` + // Level - The level code. Possible values include: 'Info', 'Warning', 'Error' + Level StatusLevelTypes `json:"level,omitempty"` + // DisplayStatus - The short localizable label for the status. + DisplayStatus *string `json:"displayStatus,omitempty"` + // Message - The detailed status message, including for alerts and error messages. + Message *string `json:"message,omitempty"` + // Time - The time of the status. + Time *date.Time `json:"time,omitempty"` +} + +// KeyVaultAndKeyReference key Vault Key Url and vault id of KeK, KeK is optional and when provided is used to unwrap +// the encryptionKey +type KeyVaultAndKeyReference struct { + // SourceVault - Resource id of the KeyVault containing the key or secret + SourceVault *SourceVault `json:"sourceVault,omitempty"` + // KeyURL - Url pointing to a key or secret in KeyVault + KeyURL *string `json:"keyUrl,omitempty"` +} + +// KeyVaultAndSecretReference key Vault Secret Url and vault id of the encryption key +type KeyVaultAndSecretReference struct { + // SourceVault - Resource id of the KeyVault containing the key or secret + SourceVault *SourceVault `json:"sourceVault,omitempty"` + // SecretURL - Url pointing to a key or secret in KeyVault + SecretURL *string `json:"secretUrl,omitempty"` +} + +// KeyVaultKeyReference describes a reference to Key Vault Key +type KeyVaultKeyReference struct { + // KeyURL - The URL referencing a key encryption key in Key Vault. + KeyURL *string `json:"keyUrl,omitempty"` + // SourceVault - The relative URL of the Key Vault containing the key. + SourceVault *SubResource `json:"sourceVault,omitempty"` +} + +// KeyVaultSecretReference describes a reference to Key Vault Secret +type KeyVaultSecretReference struct { + // SecretURL - The URL referencing a secret in a Key Vault. + SecretURL *string `json:"secretUrl,omitempty"` + // SourceVault - The relative URL of the Key Vault containing the secret. + SourceVault *SubResource `json:"sourceVault,omitempty"` +} + +// LinuxConfiguration specifies the Linux operating system settings on the virtual machine.

For a list of +// supported Linux distributions, see [Linux on Azure-Endorsed +// Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) +//

For running non-endorsed distributions, see [Information for Non-Endorsed +// Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). +type LinuxConfiguration struct { + // DisablePasswordAuthentication - Specifies whether password authentication should be disabled. + DisablePasswordAuthentication *bool `json:"disablePasswordAuthentication,omitempty"` + // SSH - Specifies the ssh key configuration for a Linux OS. + SSH *SSHConfiguration `json:"ssh,omitempty"` +} + +// ListUsagesResult the List Usages operation response. +type ListUsagesResult struct { + autorest.Response `json:"-"` + // Value - The list of compute resource usages. + Value *[]Usage `json:"value,omitempty"` + // NextLink - The URI to fetch the next page of compute resource usage information. Call ListNext() with this to fetch the next page of compute resource usage information. + NextLink *string `json:"nextLink,omitempty"` +} + +// ListUsagesResultIterator provides access to a complete listing of Usage values. +type ListUsagesResultIterator struct { + i int + page ListUsagesResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ListUsagesResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ListUsagesResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ListUsagesResultIterator) Response() ListUsagesResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ListUsagesResultIterator) Value() Usage { + if !iter.page.NotDone() { + return Usage{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (lur ListUsagesResult) IsEmpty() bool { + return lur.Value == nil || len(*lur.Value) == 0 +} + +// listUsagesResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (lur ListUsagesResult) listUsagesResultPreparer() (*http.Request, error) { + if lur.NextLink == nil || len(to.String(lur.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(lur.NextLink))) +} + +// ListUsagesResultPage contains a page of Usage values. +type ListUsagesResultPage struct { + fn func(ListUsagesResult) (ListUsagesResult, error) + lur ListUsagesResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ListUsagesResultPage) Next() error { + next, err := page.fn(page.lur) + if err != nil { + return err + } + page.lur = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ListUsagesResultPage) NotDone() bool { + return !page.lur.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ListUsagesResultPage) Response() ListUsagesResult { + return page.lur +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ListUsagesResultPage) Values() []Usage { + if page.lur.IsEmpty() { + return nil + } + return *page.lur.Value +} + +// ListVirtualMachineExtensionImage ... +type ListVirtualMachineExtensionImage struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineExtensionImage `json:"value,omitempty"` +} + +// ListVirtualMachineImageResource ... +type ListVirtualMachineImageResource struct { + autorest.Response `json:"-"` + Value *[]VirtualMachineImageResource `json:"value,omitempty"` +} + +// LogAnalyticsExportRequestRateByIntervalFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type LogAnalyticsExportRequestRateByIntervalFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future LogAnalyticsExportRequestRateByIntervalFuture) Result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return laor, autorest.NewError("compute.LogAnalyticsExportRequestRateByIntervalFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + laor, err = client.ExportRequestRateByIntervalResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + laor, err = client.ExportRequestRateByIntervalResponder(resp) + return +} + +// LogAnalyticsExportThrottledRequestsFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type LogAnalyticsExportThrottledRequestsFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future LogAnalyticsExportThrottledRequestsFuture) Result(client LogAnalyticsClient) (laor LogAnalyticsOperationResult, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return laor, autorest.NewError("compute.LogAnalyticsExportThrottledRequestsFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + laor, err = client.ExportThrottledRequestsResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + laor, err = client.ExportThrottledRequestsResponder(resp) + return +} + +// LogAnalyticsInputBase api input base class for LogAnalytics Api. +type LogAnalyticsInputBase struct { + // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. + BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` + // FromTime - From time of the query + FromTime *date.Time `json:"fromTime,omitempty"` + // ToTime - To time of the query + ToTime *date.Time `json:"toTime,omitempty"` + // GroupByThrottlePolicy - Group query result by Throttle Policy applied. + GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` + // GroupByOperationName - Group query result by by Operation Name. + GroupByOperationName *bool `json:"groupByOperationName,omitempty"` + // GroupByResourceName - Group query result by Resource Name. + GroupByResourceName *bool `json:"groupByResourceName,omitempty"` +} + +// LogAnalyticsOperationResult logAnalytics operation status response +type LogAnalyticsOperationResult struct { + autorest.Response `json:"-"` + // Name - Operation ID + Name *string `json:"name,omitempty"` + // Status - Operation status + Status *string `json:"status,omitempty"` + // StartTime - Start time of the operation + StartTime *date.Time `json:"startTime,omitempty"` + // EndTime - End time of the operation + EndTime *date.Time `json:"endTime,omitempty"` + // Error - Api error + Error *APIError `json:"error,omitempty"` + // Properties - LogAnalyticsOutput + Properties *LogAnalyticsOutput `json:"properties,omitempty"` +} + +// LogAnalyticsOutput logAnalytics output properties +type LogAnalyticsOutput struct { + // Output - Output file Uri path to blob container. + Output *string `json:"output,omitempty"` +} + +// LongRunningOperationProperties compute-specific operation properties, including output +type LongRunningOperationProperties struct { + // Output - Operation output data (raw JSON) + Output *map[string]interface{} `json:"output,omitempty"` +} + +// MaintenanceRedeployStatus maintenance Operation Status. +type MaintenanceRedeployStatus struct { + // IsCustomerInitiatedMaintenanceAllowed - True, if customer is allowed to perform Maintenance. + IsCustomerInitiatedMaintenanceAllowed *bool `json:"isCustomerInitiatedMaintenanceAllowed,omitempty"` + // PreMaintenanceWindowStartTime - Start Time for the Pre Maintenance Window. + PreMaintenanceWindowStartTime *date.Time `json:"preMaintenanceWindowStartTime,omitempty"` + // PreMaintenanceWindowEndTime - End Time for the Pre Maintenance Window. + PreMaintenanceWindowEndTime *date.Time `json:"preMaintenanceWindowEndTime,omitempty"` + // MaintenanceWindowStartTime - Start Time for the Maintenance Window. + MaintenanceWindowStartTime *date.Time `json:"maintenanceWindowStartTime,omitempty"` + // MaintenanceWindowEndTime - End Time for the Maintenance Window. + MaintenanceWindowEndTime *date.Time `json:"maintenanceWindowEndTime,omitempty"` + // LastOperationResultCode - The Last Maintenance Operation Result Code. Possible values include: 'MaintenanceOperationResultCodeTypesNone', 'MaintenanceOperationResultCodeTypesRetryLater', 'MaintenanceOperationResultCodeTypesMaintenanceAborted', 'MaintenanceOperationResultCodeTypesMaintenanceCompleted' + LastOperationResultCode MaintenanceOperationResultCodeTypes `json:"lastOperationResultCode,omitempty"` + // LastOperationMessage - Message returned for the last Maintenance Operation. + LastOperationMessage *string `json:"lastOperationMessage,omitempty"` +} + +// ManagedDiskParameters the parameters of a managed disk. +type ManagedDiskParameters struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` + // StorageAccountType - Specifies the storage account type for the managed disk. Possible values are: Standard_LRS or Premium_LRS. Possible values include: 'StandardLRS', 'PremiumLRS' + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` +} + +// NetworkInterfaceReference describes a network interface reference. +type NetworkInterfaceReference struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` + *NetworkInterfaceReferenceProperties `json:"properties,omitempty"` +} + +// NetworkInterfaceReferenceProperties describes a network interface reference properties. +type NetworkInterfaceReferenceProperties struct { + // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface. + Primary *bool `json:"primary,omitempty"` +} + +// NetworkProfile specifies the network interfaces of the virtual machine. +type NetworkProfile struct { + // NetworkInterfaces - Specifies the list of resource Ids for the network interfaces associated with the virtual machine. + NetworkInterfaces *[]NetworkInterfaceReference `json:"networkInterfaces,omitempty"` +} + +// OperationStatusResponse operation status response +type OperationStatusResponse struct { + autorest.Response `json:"-"` + // Name - Operation ID + Name *string `json:"name,omitempty"` + // Status - Operation status + Status *string `json:"status,omitempty"` + // StartTime - Start time of the operation + StartTime *date.Time `json:"startTime,omitempty"` + // EndTime - End time of the operation + EndTime *date.Time `json:"endTime,omitempty"` + // Error - Api error + Error *APIError `json:"error,omitempty"` +} + +// OSDisk specifies information about the operating system disk used by the virtual machine.

For more +// information about disks, see [About disks and VHDs for Azure virtual +// machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). +type OSDisk struct { + // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD.

Possible values are:

**Windows**

**Linux**. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // EncryptionSettings - Specifies the encryption settings for the OS Disk.

Minimum api-version: 2015-06-15 + EncryptionSettings *DiskEncryptionSettings `json:"encryptionSettings,omitempty"` + // Name - The disk name. + Name *string `json:"name,omitempty"` + // Vhd - The virtual hard disk. + Vhd *VirtualHardDisk `json:"vhd,omitempty"` + // Image - The source user image virtual hard disk. The virtual hard disk will be copied before being attached to the virtual machine. If SourceImage is provided, the destination virtual hard drive must not exist. + Image *VirtualHardDisk `json:"image,omitempty"` + // Caching - Specifies the caching requirements.

Possible values are:

**None**

**ReadOnly**

**ReadWrite**

Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` + // CreateOption - Specifies how the virtual machine should be created.

Possible values are:

**Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.

**FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // ManagedDisk - The managed disk parameters. + ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"` +} + +// OSDiskImage contains the os disk image information. +type OSDiskImage struct { + // OperatingSystem - The operating system of the osDiskImage. Possible values include: 'Windows', 'Linux' + OperatingSystem OperatingSystemTypes `json:"operatingSystem,omitempty"` +} + +// OSProfile specifies the operating system settings for the virtual machine. +type OSProfile struct { + // ComputerName - Specifies the host OS name of the virtual machine.

**Max-length (Windows):** 15 characters

**Max-length (Linux):** 64 characters.

For naming conventions and restrictions see [Azure infrastructure services implementation guidelines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-infrastructure-subscription-accounts-guidelines?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#1-naming-conventions). + ComputerName *string `json:"computerName,omitempty"` + // AdminUsername - Specifies the name of the administrator account.

**Windows-only restriction:** Cannot end in "."

**Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".

**Minimum-length (Linux):** 1 character

**Max-length (Linux):** 64 characters

**Max-length (Windows):** 20 characters

  • For root access to the Linux VM, see [Using root privileges on Linux virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
  • For a list of built-in system users on Linux that should not be used in this field, see [Selecting User Names for Linux on Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) + AdminUsername *string `json:"adminUsername,omitempty"` + // AdminPassword - Specifies the password of the administrator account.

    **Minimum-length (Windows):** 8 characters

    **Minimum-length (Linux):** 6 characters

    **Max-length (Windows):** 123 characters

    **Max-length (Linux):** 72 characters

    **Complexity requirements:** 3 out of 4 conditions below need to be fulfilled
    Has lower characters
    Has upper characters
    Has a digit
    Has a special character (Regex match [\W_])

    **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!"

    For resetting the password, see [How to reset the Remote Desktop service or its login password in a Windows VM](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-reset-rdp?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    For resetting root password, see [Manage users, SSH, and check or repair disks on Azure Linux VMs using the VMAccess Extension](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-vmaccess-extension?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#reset-root-password) + AdminPassword *string `json:"adminPassword,omitempty"` + // CustomData - Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes.

    For using cloud-init for your VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) + CustomData *string `json:"customData,omitempty"` + // WindowsConfiguration - Specifies Windows operating system settings on the virtual machine. + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` + // LinuxConfiguration - Specifies the Linux operating system settings on the virtual machine.

    For a list of supported Linux distributions, see [Linux on Azure-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)

    For running non-endorsed distributions, see [Information for Non-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). + LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` + // Secrets - Specifies set of certificates that should be installed onto the virtual machine. + Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` +} + +// Plan specifies information about the marketplace image used to create the virtual machine. This element is only used +// for marketplace images. Before you can use a marketplace image from an API, you must enable the image for +// programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to +// deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. +type Plan struct { + // Name - The plan ID. + Name *string `json:"name,omitempty"` + // Publisher - The publisher ID. + Publisher *string `json:"publisher,omitempty"` + // Product - Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element. + Product *string `json:"product,omitempty"` + // PromotionCode - The promotion code. + PromotionCode *string `json:"promotionCode,omitempty"` +} + +// PurchasePlan used for establishing the purchase context of any 3rd Party artifact through MarketPlace. +type PurchasePlan struct { + // Publisher - The publisher ID. + Publisher *string `json:"publisher,omitempty"` + // Name - The plan ID. + Name *string `json:"name,omitempty"` + // Product - Specifies the product of the image from the marketplace. This is the same value as Offer under the imageReference element. + Product *string `json:"product,omitempty"` +} + +// RecoveryWalkResponse response after calling a manual recovery walk +type RecoveryWalkResponse struct { + autorest.Response `json:"-"` + // WalkPerformed - Whether the recovery walk was performed + WalkPerformed *bool `json:"walkPerformed,omitempty"` + // NextPlatformUpdateDomain - The next update domain that needs to be walked. Null means walk spanning all update domains has been completed + NextPlatformUpdateDomain *int32 `json:"nextPlatformUpdateDomain,omitempty"` +} + +// RequestRateByIntervalInput api request input for LogAnalytics getRequestRateByInterval Api. +type RequestRateByIntervalInput struct { + // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. + BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` + // FromTime - From time of the query + FromTime *date.Time `json:"fromTime,omitempty"` + // ToTime - To time of the query + ToTime *date.Time `json:"toTime,omitempty"` + // GroupByThrottlePolicy - Group query result by Throttle Policy applied. + GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` + // GroupByOperationName - Group query result by by Operation Name. + GroupByOperationName *bool `json:"groupByOperationName,omitempty"` + // GroupByResourceName - Group query result by Resource Name. + GroupByResourceName *bool `json:"groupByResourceName,omitempty"` + // IntervalLength - Interval value in minutes used to create LogAnalytics call rate logs. Possible values include: 'ThreeMins', 'FiveMins', 'ThirtyMins', 'SixtyMins' + IntervalLength IntervalInMins `json:"intervalLength,omitempty"` +} + +// Resource the Resource model definition. +type Resource struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceSku describes an available Compute SKU. +type ResourceSku struct { + // ResourceType - The type of resource the SKU applies to. + ResourceType *string `json:"resourceType,omitempty"` + // Name - The name of SKU. + Name *string `json:"name,omitempty"` + // Tier - Specifies the tier of virtual machines in a scale set.

    Possible Values:

    **Standard**

    **Basic** + Tier *string `json:"tier,omitempty"` + // Size - The Size of the SKU. + Size *string `json:"size,omitempty"` + // Family - The Family of this particular SKU. + Family *string `json:"family,omitempty"` + // Kind - The Kind of resources that are supported in this SKU. + Kind *string `json:"kind,omitempty"` + // Capacity - Specifies the number of virtual machines in the scale set. + Capacity *ResourceSkuCapacity `json:"capacity,omitempty"` + // Locations - The set of locations that the SKU is available. + Locations *[]string `json:"locations,omitempty"` + // LocationInfo - A list of locations and availability zones in those locations where the SKU is available. + LocationInfo *[]ResourceSkuLocationInfo `json:"locationInfo,omitempty"` + // APIVersions - The api versions that support this SKU. + APIVersions *[]string `json:"apiVersions,omitempty"` + // Costs - Metadata for retrieving price info. + Costs *[]ResourceSkuCosts `json:"costs,omitempty"` + // Capabilities - A name value pair to describe the capability. + Capabilities *[]ResourceSkuCapabilities `json:"capabilities,omitempty"` + // Restrictions - The restrictions because of which SKU cannot be used. This is empty if there are no restrictions. + Restrictions *[]ResourceSkuRestrictions `json:"restrictions,omitempty"` +} + +// ResourceSkuCapabilities describes The SKU capabilites object. +type ResourceSkuCapabilities struct { + // Name - An invariant to describe the feature. + Name *string `json:"name,omitempty"` + // Value - An invariant if the feature is measured by quantity. + Value *string `json:"value,omitempty"` +} + +// ResourceSkuCapacity describes scaling information of a SKU. +type ResourceSkuCapacity struct { + // Minimum - The minimum capacity. + Minimum *int64 `json:"minimum,omitempty"` + // Maximum - The maximum capacity that can be set. + Maximum *int64 `json:"maximum,omitempty"` + // Default - The default capacity. + Default *int64 `json:"default,omitempty"` + // ScaleType - The scale type applicable to the sku. Possible values include: 'ResourceSkuCapacityScaleTypeAutomatic', 'ResourceSkuCapacityScaleTypeManual', 'ResourceSkuCapacityScaleTypeNone' + ScaleType ResourceSkuCapacityScaleType `json:"scaleType,omitempty"` +} + +// ResourceSkuCosts describes metadata for retrieving price info. +type ResourceSkuCosts struct { + // MeterID - Used for querying price from commerce. + MeterID *string `json:"meterID,omitempty"` + // Quantity - The multiplier is needed to extend the base metered cost. + Quantity *int64 `json:"quantity,omitempty"` + // ExtendedUnit - An invariant to show the extended unit. + ExtendedUnit *string `json:"extendedUnit,omitempty"` +} + +// ResourceSkuLocationInfo ... +type ResourceSkuLocationInfo struct { + // Location - Location of the SKU + Location *string `json:"location,omitempty"` + // Zones - List of availability zones where the SKU is supported. + Zones *[]string `json:"zones,omitempty"` +} + +// ResourceSkuRestrictionInfo ... +type ResourceSkuRestrictionInfo struct { + // Locations - Locations where the SKU is restricted + Locations *[]string `json:"locations,omitempty"` + // Zones - List of availability zones where the SKU is restricted. + Zones *[]string `json:"zones,omitempty"` +} + +// ResourceSkuRestrictions describes scaling information of a SKU. +type ResourceSkuRestrictions struct { + // Type - The type of restrictions. Possible values include: 'Location', 'Zone' + Type ResourceSkuRestrictionsType `json:"type,omitempty"` + // Values - The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted. + Values *[]string `json:"values,omitempty"` + // RestrictionInfo - The information about the restriction where the SKU cannot be used. + RestrictionInfo *ResourceSkuRestrictionInfo `json:"restrictionInfo,omitempty"` + // ReasonCode - The reason for restriction. Possible values include: 'QuotaID', 'NotAvailableForSubscription' + ReasonCode ResourceSkuRestrictionsReasonCode `json:"reasonCode,omitempty"` +} + +// ResourceSkusResult the Compute List Skus operation response. +type ResourceSkusResult struct { + autorest.Response `json:"-"` + // Value - The list of skus available for the subscription. + Value *[]ResourceSku `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Compute Skus. Call ListNext() with this to fetch the next page of VMSS Skus. + NextLink *string `json:"nextLink,omitempty"` +} + +// ResourceSkusResultIterator provides access to a complete listing of ResourceSku values. +type ResourceSkusResultIterator struct { + i int + page ResourceSkusResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ResourceSkusResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ResourceSkusResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ResourceSkusResultIterator) Response() ResourceSkusResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ResourceSkusResultIterator) Value() ResourceSku { + if !iter.page.NotDone() { + return ResourceSku{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (rsr ResourceSkusResult) IsEmpty() bool { + return rsr.Value == nil || len(*rsr.Value) == 0 +} + +// resourceSkusResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rsr ResourceSkusResult) resourceSkusResultPreparer() (*http.Request, error) { + if rsr.NextLink == nil || len(to.String(rsr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rsr.NextLink))) +} + +// ResourceSkusResultPage contains a page of ResourceSku values. +type ResourceSkusResultPage struct { + fn func(ResourceSkusResult) (ResourceSkusResult, error) + rsr ResourceSkusResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ResourceSkusResultPage) Next() error { + next, err := page.fn(page.rsr) + if err != nil { + return err + } + page.rsr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ResourceSkusResultPage) NotDone() bool { + return !page.rsr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ResourceSkusResultPage) Response() ResourceSkusResult { + return page.rsr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ResourceSkusResultPage) Values() []ResourceSku { + if page.rsr.IsEmpty() { + return nil + } + return *page.rsr.Value +} + +// ResourceUpdate the Resource model definition. +type ResourceUpdate struct { + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` + Sku *DiskSku `json:"sku,omitempty"` +} + +// RollingUpgradePolicy the configuration parameters used while performing a rolling upgrade. +type RollingUpgradePolicy struct { + // MaxBatchInstancePercent - The maximum percent of total virtual machine instances that will be upgraded simultaneously by the rolling upgrade in one batch. As this is a maximum, unhealthy instances in previous or future batches can cause the percentage of instances in a batch to decrease to ensure higher reliability. The default value for this parameter is 20%. + MaxBatchInstancePercent *int32 `json:"maxBatchInstancePercent,omitempty"` + // MaxUnhealthyInstancePercent - The maximum percentage of the total virtual machine instances in the scale set that can be simultaneously unhealthy, either as a result of being upgraded, or by being found in an unhealthy state by the virtual machine health checks before the rolling upgrade aborts. This constraint will be checked prior to starting any batch. The default value for this parameter is 20%. + MaxUnhealthyInstancePercent *int32 `json:"maxUnhealthyInstancePercent,omitempty"` + // MaxUnhealthyUpgradedInstancePercent - The maximum percentage of upgraded virtual machine instances that can be found to be in an unhealthy state. This check will happen after each batch is upgraded. If this percentage is ever exceeded, the rolling update aborts. The default value for this parameter is 20%. + MaxUnhealthyUpgradedInstancePercent *int32 `json:"maxUnhealthyUpgradedInstancePercent,omitempty"` + // PauseTimeBetweenBatches - The wait time between completing the update for all virtual machines in one batch and starting the next batch. The time duration should be specified in ISO 8601 format. The default value is 0 seconds (PT0S). + PauseTimeBetweenBatches *string `json:"pauseTimeBetweenBatches,omitempty"` +} + +// RollingUpgradeProgressInfo information about the number of virtual machine instances in each upgrade state. +type RollingUpgradeProgressInfo struct { + // SuccessfulInstanceCount - The number of instances that have been successfully upgraded. + SuccessfulInstanceCount *int32 `json:"successfulInstanceCount,omitempty"` + // FailedInstanceCount - The number of instances that have failed to be upgraded successfully. + FailedInstanceCount *int32 `json:"failedInstanceCount,omitempty"` + // InProgressInstanceCount - The number of instances that are currently being upgraded. + InProgressInstanceCount *int32 `json:"inProgressInstanceCount,omitempty"` + // PendingInstanceCount - The number of instances that have not yet begun to be upgraded. + PendingInstanceCount *int32 `json:"pendingInstanceCount,omitempty"` +} + +// RollingUpgradeRunningStatus information about the current running state of the overall upgrade. +type RollingUpgradeRunningStatus struct { + // Code - Code indicating the current status of the upgrade. Possible values include: 'RollingForward', 'Cancelled', 'Completed', 'Faulted' + Code RollingUpgradeStatusCode `json:"code,omitempty"` + // StartTime - Start time of the upgrade. + StartTime *date.Time `json:"startTime,omitempty"` + // LastAction - The last action performed on the rolling upgrade. Possible values include: 'Start', 'Cancel' + LastAction RollingUpgradeActionType `json:"lastAction,omitempty"` + // LastActionTime - Last action time of the upgrade. + LastActionTime *date.Time `json:"lastActionTime,omitempty"` +} + +// RollingUpgradeStatusInfo the status of the latest virtual machine scale set rolling upgrade. +type RollingUpgradeStatusInfo struct { + autorest.Response `json:"-"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` + *RollingUpgradeStatusInfoProperties `json:"properties,omitempty"` +} + +// RollingUpgradeStatusInfoProperties the status of the latest virtual machine scale set rolling upgrade. +type RollingUpgradeStatusInfoProperties struct { + // Policy - The rolling upgrade policies applied for this upgrade. + Policy *RollingUpgradePolicy `json:"policy,omitempty"` + // RunningStatus - Information about the current running state of the overall upgrade. + RunningStatus *RollingUpgradeRunningStatus `json:"runningStatus,omitempty"` + // Progress - Information about the number of virtual machine instances in each upgrade state. + Progress *RollingUpgradeProgressInfo `json:"progress,omitempty"` + // Error - Error details for this upgrade, if there are any. + Error *APIError `json:"error,omitempty"` +} + +// RunCommandDocument describes the properties of a Run Command. +type RunCommandDocument struct { + autorest.Response `json:"-"` + // Schema - The VM run command schema. + Schema *string `json:"$schema,omitempty"` + // ID - The VM run command id. + ID *string `json:"id,omitempty"` + // OsType - The Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // Label - The VM run command label. + Label *string `json:"label,omitempty"` + // Description - The VM run command description. + Description *string `json:"description,omitempty"` + // Script - The script to be executed. + Script *[]string `json:"script,omitempty"` + // Parameters - The parameters used by the script. + Parameters *[]RunCommandParameterDefinition `json:"parameters,omitempty"` +} + +// RunCommandDocumentBase describes the properties of a Run Command metadata. +type RunCommandDocumentBase struct { + // Schema - The VM run command schema. + Schema *string `json:"$schema,omitempty"` + // ID - The VM run command id. + ID *string `json:"id,omitempty"` + // OsType - The Operating System type. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // Label - The VM run command label. + Label *string `json:"label,omitempty"` + // Description - The VM run command description. + Description *string `json:"description,omitempty"` +} + +// RunCommandInput capture Virtual Machine parameters. +type RunCommandInput struct { + // CommandID - The run command id. + CommandID *string `json:"commandId,omitempty"` + // Script - Optional. The script to be executed. When this value is given, the given script will override the default script of the command. + Script *[]string `json:"script,omitempty"` + // Parameters - The run command parameters. + Parameters *[]RunCommandInputParameter `json:"parameters,omitempty"` +} + +// RunCommandInputParameter describes the properties of a run command parameter. +type RunCommandInputParameter struct { + // Name - The run command parameter name. + Name *string `json:"name,omitempty"` + // Value - The run command parameter value. + Value *string `json:"value,omitempty"` +} + +// RunCommandListResult the List Virtual Machine operation response. +type RunCommandListResult struct { + autorest.Response `json:"-"` + // Value - The list of virtual machine run commands. + Value *[]RunCommandDocumentBase `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of run commands. Call ListNext() with this to fetch the next page of run commands. + NextLink *string `json:"nextLink,omitempty"` +} + +// RunCommandListResultIterator provides access to a complete listing of RunCommandDocumentBase values. +type RunCommandListResultIterator struct { + i int + page RunCommandListResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *RunCommandListResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter RunCommandListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter RunCommandListResultIterator) Response() RunCommandListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter RunCommandListResultIterator) Value() RunCommandDocumentBase { + if !iter.page.NotDone() { + return RunCommandDocumentBase{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (rclr RunCommandListResult) IsEmpty() bool { + return rclr.Value == nil || len(*rclr.Value) == 0 +} + +// runCommandListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (rclr RunCommandListResult) runCommandListResultPreparer() (*http.Request, error) { + if rclr.NextLink == nil || len(to.String(rclr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(rclr.NextLink))) +} + +// RunCommandListResultPage contains a page of RunCommandDocumentBase values. +type RunCommandListResultPage struct { + fn func(RunCommandListResult) (RunCommandListResult, error) + rclr RunCommandListResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *RunCommandListResultPage) Next() error { + next, err := page.fn(page.rclr) + if err != nil { + return err + } + page.rclr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page RunCommandListResultPage) NotDone() bool { + return !page.rclr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page RunCommandListResultPage) Response() RunCommandListResult { + return page.rclr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page RunCommandListResultPage) Values() []RunCommandDocumentBase { + if page.rclr.IsEmpty() { + return nil + } + return *page.rclr.Value +} + +// RunCommandParameterDefinition describes the properties of a run command parameter. +type RunCommandParameterDefinition struct { + // Name - The run command parameter name. + Name *string `json:"name,omitempty"` + // Type - The run command parameter type. + Type *string `json:"type,omitempty"` + // DefaultValue - The run command parameter default value. + DefaultValue *string `json:"defaultValue,omitempty"` + // Required - The run command parameter required. + Required *bool `json:"required,omitempty"` +} + +// RunCommandResult run command operation response. +type RunCommandResult struct { + autorest.Response `json:"-"` + // Name - Operation ID + Name *string `json:"name,omitempty"` + // Status - Operation status + Status *string `json:"status,omitempty"` + // StartTime - Start time of the operation + StartTime *date.Time `json:"startTime,omitempty"` + // EndTime - End time of the operation + EndTime *date.Time `json:"endTime,omitempty"` + // Error - Api error + Error *APIError `json:"error,omitempty"` + *RunCommandResultProperties `json:"properties,omitempty"` +} + +// RunCommandResultProperties compute-specific operation properties, including output +type RunCommandResultProperties struct { + // Output - Operation output data (raw JSON) + Output *map[string]interface{} `json:"output,omitempty"` +} + +// Sku describes a virtual machine scale set sku. +type Sku struct { + // Name - The sku name. + Name *string `json:"name,omitempty"` + // Tier - Specifies the tier of virtual machines in a scale set.

    Possible Values:

    **Standard**

    **Basic** + Tier *string `json:"tier,omitempty"` + // Capacity - Specifies the number of virtual machines in the scale set. + Capacity *int64 `json:"capacity,omitempty"` +} + +// Snapshot snapshot resource. +type Snapshot struct { + autorest.Response `json:"-"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` + // ManagedBy - Unused. Always Null. + ManagedBy *string `json:"managedBy,omitempty"` + Sku *DiskSku `json:"sku,omitempty"` + *DiskProperties `json:"properties,omitempty"` +} + +// SnapshotList the List Snapshots operation response. +type SnapshotList struct { + autorest.Response `json:"-"` + // Value - A list of snapshots. + Value *[]Snapshot `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of snapshots. Call ListNext() with this to fetch the next page of snapshots. + NextLink *string `json:"nextLink,omitempty"` +} + +// SnapshotListIterator provides access to a complete listing of Snapshot values. +type SnapshotListIterator struct { + i int + page SnapshotListPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *SnapshotListIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter SnapshotListIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter SnapshotListIterator) Response() SnapshotList { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter SnapshotListIterator) Value() Snapshot { + if !iter.page.NotDone() { + return Snapshot{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (sl SnapshotList) IsEmpty() bool { + return sl.Value == nil || len(*sl.Value) == 0 +} + +// snapshotListPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (sl SnapshotList) snapshotListPreparer() (*http.Request, error) { + if sl.NextLink == nil || len(to.String(sl.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(sl.NextLink))) +} + +// SnapshotListPage contains a page of Snapshot values. +type SnapshotListPage struct { + fn func(SnapshotList) (SnapshotList, error) + sl SnapshotList +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *SnapshotListPage) Next() error { + next, err := page.fn(page.sl) + if err != nil { + return err + } + page.sl = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page SnapshotListPage) NotDone() bool { + return !page.sl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page SnapshotListPage) Response() SnapshotList { + return page.sl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page SnapshotListPage) Values() []Snapshot { + if page.sl.IsEmpty() { + return nil + } + return *page.sl.Value +} + +// SnapshotsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type SnapshotsCreateOrUpdateFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future SnapshotsCreateOrUpdateFuture) Result(client SnapshotsClient) (s Snapshot, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return s, autorest.NewError("compute.SnapshotsCreateOrUpdateFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + s, err = client.CreateOrUpdateResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + s, err = client.CreateOrUpdateResponder(resp) + return +} + +// SnapshotsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type SnapshotsDeleteFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future SnapshotsDeleteFuture) Result(client SnapshotsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.SnapshotsDeleteFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.DeleteResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.DeleteResponder(resp) + return +} + +// SnapshotsGrantAccessFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type SnapshotsGrantAccessFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future SnapshotsGrantAccessFuture) Result(client SnapshotsClient) (au AccessURI, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return au, autorest.NewError("compute.SnapshotsGrantAccessFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + au, err = client.GrantAccessResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + au, err = client.GrantAccessResponder(resp) + return +} + +// SnapshotsRevokeAccessFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type SnapshotsRevokeAccessFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future SnapshotsRevokeAccessFuture) Result(client SnapshotsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.SnapshotsRevokeAccessFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.RevokeAccessResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.RevokeAccessResponder(resp) + return +} + +// SnapshotsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type SnapshotsUpdateFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future SnapshotsUpdateFuture) Result(client SnapshotsClient) (s Snapshot, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return s, autorest.NewError("compute.SnapshotsUpdateFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + s, err = client.UpdateResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + s, err = client.UpdateResponder(resp) + return +} + +// SnapshotUpdate snapshot update resource. +type SnapshotUpdate struct { + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` + Sku *DiskSku `json:"sku,omitempty"` + *DiskUpdateProperties `json:"properties,omitempty"` +} + +// SourceVault the vault id is an Azure Resource Manager Resoure id in the form +// /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName} +type SourceVault struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// SSHConfiguration SSH configuration for Linux based VMs running on Azure +type SSHConfiguration struct { + // PublicKeys - The list of SSH public keys used to authenticate with linux based VMs. + PublicKeys *[]SSHPublicKey `json:"publicKeys,omitempty"` +} + +// SSHPublicKey contains information about SSH certificate public key and the path on the Linux VM where the public key +// is placed. +type SSHPublicKey struct { + // Path - Specifies the full path on the created VM where ssh public key is stored. If the file already exists, the specified key is appended to the file. Example: /home/user/.ssh/authorized_keys + Path *string `json:"path,omitempty"` + // KeyData - SSH public key certificate used to authenticate with the VM through ssh. The key needs to be at least 2048-bit and in ssh-rsa format.

    For creating ssh keys, see [Create SSH keys on Linux and Mac for Linux VMs in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-mac-create-ssh-keys?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). + KeyData *string `json:"keyData,omitempty"` +} + +// StorageProfile specifies the storage settings for the virtual machine disks. +type StorageProfile struct { + // ImageReference - Specifies information about the image to use. You can specify information about platform images, marketplace images, or virtual machine images. This element is required when you want to use a platform image, marketplace image, or virtual machine image, but is not used in other creation operations. + ImageReference *ImageReference `json:"imageReference,omitempty"` + // OsDisk - Specifies information about the operating system disk used by the virtual machine.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). + OsDisk *OSDisk `json:"osDisk,omitempty"` + // DataDisks - Specifies the parameters that are used to add a data disk to a virtual machine.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). + DataDisks *[]DataDisk `json:"dataDisks,omitempty"` +} + +// SubResource ... +type SubResource struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// SubResourceReadOnly ... +type SubResourceReadOnly struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` +} + +// ThrottledRequestsInput api request input for LogAnalytics getThrottledRequests Api. +type ThrottledRequestsInput struct { + // BlobContainerSasURI - SAS Uri of the logging blob container to which LogAnalytics Api writes output logs to. + BlobContainerSasURI *string `json:"blobContainerSasUri,omitempty"` + // FromTime - From time of the query + FromTime *date.Time `json:"fromTime,omitempty"` + // ToTime - To time of the query + ToTime *date.Time `json:"toTime,omitempty"` + // GroupByThrottlePolicy - Group query result by Throttle Policy applied. + GroupByThrottlePolicy *bool `json:"groupByThrottlePolicy,omitempty"` + // GroupByOperationName - Group query result by by Operation Name. + GroupByOperationName *bool `json:"groupByOperationName,omitempty"` + // GroupByResourceName - Group query result by Resource Name. + GroupByResourceName *bool `json:"groupByResourceName,omitempty"` +} + +// UpdateResource the Update Resource model definition. +type UpdateResource struct { + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` +} + +// UpgradePolicy describes an upgrade policy - automatic, manual, or rolling. +type UpgradePolicy struct { + // Mode - Specifies the mode of an upgrade to virtual machines in the scale set.

    Possible values are:

    **Manual** - You control the application of updates to virtual machines in the scale set. You do this by using the manualUpgrade action.

    **Automatic** - All virtual machines in the scale set are automatically updated at the same time. Possible values include: 'Automatic', 'Manual', 'Rolling' + Mode UpgradeMode `json:"mode,omitempty"` + // RollingUpgradePolicy - The configuration parameters used while performing a rolling upgrade. + RollingUpgradePolicy *RollingUpgradePolicy `json:"rollingUpgradePolicy,omitempty"` + // AutomaticOSUpgrade - Whether OS upgrades should automatically be applied to scale set instances in a rolling fashion when a newer version of the image becomes available. + AutomaticOSUpgrade *bool `json:"automaticOSUpgrade,omitempty"` +} + +// Usage describes Compute Resource Usage. +type Usage struct { + // Unit - An enum describing the unit of usage measurement. + Unit *string `json:"unit,omitempty"` + // CurrentValue - The current usage of the resource. + CurrentValue *int32 `json:"currentValue,omitempty"` + // Limit - The maximum permitted usage of the resource. + Limit *int64 `json:"limit,omitempty"` + // Name - The name of the type of usage. + Name *UsageName `json:"name,omitempty"` +} + +// UsageName the Usage Names. +type UsageName struct { + // Value - The name of the resource. + Value *string `json:"value,omitempty"` + // LocalizedValue - The localized name of the resource. + LocalizedValue *string `json:"localizedValue,omitempty"` +} + +// VaultCertificate describes a single certificate reference in a Key Vault, and where the certificate should reside on +// the VM. +type VaultCertificate struct { + // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8:

    {
    "data":"",
    "dataType":"pfx",
    "password":""
    } + CertificateURL *string `json:"certificateUrl,omitempty"` + // CertificateStore - For Windows VMs, specifies the certificate store on the Virtual Machine to which the certificate should be added. The specified certificate store is implicitly in the LocalMachine account.

    For Linux VMs, the certificate file is placed under the /var/lib/waagent directory, with the file name .crt for the X509 certificate file and .prv for private key. Both of these files are .pem formatted. + CertificateStore *string `json:"certificateStore,omitempty"` +} + +// VaultSecretGroup describes a set of certificates which are all in the same Key Vault. +type VaultSecretGroup struct { + // SourceVault - The relative URL of the Key Vault containing all of the certificates in VaultCertificates. + SourceVault *SubResource `json:"sourceVault,omitempty"` + // VaultCertificates - The list of key vault references in SourceVault which contain certificates. + VaultCertificates *[]VaultCertificate `json:"vaultCertificates,omitempty"` +} + +// VirtualHardDisk describes the uri of a disk. +type VirtualHardDisk struct { + // URI - Specifies the virtual hard disk's uri. + URI *string `json:"uri,omitempty"` +} + +// VirtualMachine describes a Virtual Machine. +type VirtualMachine struct { + autorest.Response `json:"-"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` + // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. + Plan *Plan `json:"plan,omitempty"` + *VirtualMachineProperties `json:"properties,omitempty"` + // Resources - The virtual machine child extension resources. + Resources *[]VirtualMachineExtension `json:"resources,omitempty"` + // Identity - The identity of the virtual machine, if configured. + Identity *VirtualMachineIdentity `json:"identity,omitempty"` + // Zones - The virtual machine zones. + Zones *[]string `json:"zones,omitempty"` +} + +// VirtualMachineAgentInstanceView the instance view of the VM Agent running on the virtual machine. +type VirtualMachineAgentInstanceView struct { + // VMAgentVersion - The VM Agent full version. + VMAgentVersion *string `json:"vmAgentVersion,omitempty"` + // ExtensionHandlers - The virtual machine extension handler instance view. + ExtensionHandlers *[]VirtualMachineExtensionHandlerInstanceView `json:"extensionHandlers,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineCaptureParameters capture Virtual Machine parameters. +type VirtualMachineCaptureParameters struct { + // VhdPrefix - The captured virtual hard disk's name prefix. + VhdPrefix *string `json:"vhdPrefix,omitempty"` + // DestinationContainerName - The destination container name. + DestinationContainerName *string `json:"destinationContainerName,omitempty"` + // OverwriteVhds - Specifies whether to overwrite the destination virtual hard disk, in case of conflict. + OverwriteVhds *bool `json:"overwriteVhds,omitempty"` +} + +// VirtualMachineCaptureResult resource Id. +type VirtualMachineCaptureResult struct { + autorest.Response `json:"-"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + *VirtualMachineCaptureResultProperties `json:"properties,omitempty"` +} + +// VirtualMachineCaptureResultProperties compute-specific operation properties, including output +type VirtualMachineCaptureResultProperties struct { + // Output - Operation output data (raw JSON) + Output *map[string]interface{} `json:"output,omitempty"` +} + +// VirtualMachineExtension describes a Virtual Machine Extension. +type VirtualMachineExtension struct { + autorest.Response `json:"-"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` + *VirtualMachineExtensionProperties `json:"properties,omitempty"` +} + +// VirtualMachineExtensionHandlerInstanceView the instance view of a virtual machine extension handler. +type VirtualMachineExtensionHandlerInstanceView struct { + // Type - Specifies the type of the extension; an example is "CustomScriptExtension". + Type *string `json:"type,omitempty"` + // TypeHandlerVersion - Specifies the version of the script handler. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + // Status - The extension handler status. + Status *InstanceViewStatus `json:"status,omitempty"` +} + +// VirtualMachineExtensionImage describes a Virtual Machine Extension Image. +type VirtualMachineExtensionImage struct { + autorest.Response `json:"-"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` + *VirtualMachineExtensionImageProperties `json:"properties,omitempty"` +} + +// VirtualMachineExtensionImageProperties describes the properties of a Virtual Machine Extension Image. +type VirtualMachineExtensionImageProperties struct { + // OperatingSystem - The operating system this extension supports. + OperatingSystem *string `json:"operatingSystem,omitempty"` + // ComputeRole - The type of role (IaaS or PaaS) this extension supports. + ComputeRole *string `json:"computeRole,omitempty"` + // HandlerSchema - The schema defined by publisher, where extension consumers should provide settings in a matching schema. + HandlerSchema *string `json:"handlerSchema,omitempty"` + // VMScaleSetEnabled - Whether the extension can be used on xRP VMScaleSets. By default existing extensions are usable on scalesets, but there might be cases where a publisher wants to explicitly indicate the extension is only enabled for CRP VMs but not VMSS. + VMScaleSetEnabled *bool `json:"vmScaleSetEnabled,omitempty"` + // SupportsMultipleExtensions - Whether the handler can support multiple extensions. + SupportsMultipleExtensions *bool `json:"supportsMultipleExtensions,omitempty"` +} + +// VirtualMachineExtensionInstanceView the instance view of a virtual machine extension. +type VirtualMachineExtensionInstanceView struct { + // Name - The virtual machine extension name. + Name *string `json:"name,omitempty"` + // Type - Specifies the type of the extension; an example is "CustomScriptExtension". + Type *string `json:"type,omitempty"` + // TypeHandlerVersion - Specifies the version of the script handler. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + // Substatuses - The resource status information. + Substatuses *[]InstanceViewStatus `json:"substatuses,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineExtensionProperties describes the properties of a Virtual Machine Extension. +type VirtualMachineExtensionProperties struct { + // ForceUpdateTag - How the extension handler should be forced to update even if the extension configuration has not changed. + ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` + // Publisher - The name of the extension handler publisher. + Publisher *string `json:"publisher,omitempty"` + // Type - Specifies the type of the extension; an example is "CustomScriptExtension". + Type *string `json:"type,omitempty"` + // TypeHandlerVersion - Specifies the version of the script handler. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + // AutoUpgradeMinorVersion - Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` + // Settings - Json formatted public settings for the extension. + Settings *map[string]interface{} `json:"settings,omitempty"` + // ProtectedSettings - The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. + ProtectedSettings *map[string]interface{} `json:"protectedSettings,omitempty"` + // ProvisioningState - The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // InstanceView - The virtual machine extension instance view. + InstanceView *VirtualMachineExtensionInstanceView `json:"instanceView,omitempty"` +} + +// VirtualMachineExtensionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineExtensionsCreateOrUpdateFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineExtensionsCreateOrUpdateFuture) Result(client VirtualMachineExtensionsClient) (vme VirtualMachineExtension, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return vme, autorest.NewError("compute.VirtualMachineExtensionsCreateOrUpdateFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + vme, err = client.CreateOrUpdateResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + vme, err = client.CreateOrUpdateResponder(resp) + return +} + +// VirtualMachineExtensionsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineExtensionsDeleteFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineExtensionsDeleteFuture) Result(client VirtualMachineExtensionsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineExtensionsDeleteFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.DeleteResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.DeleteResponder(resp) + return +} + +// VirtualMachineHealthStatus the health status of the VM. +type VirtualMachineHealthStatus struct { + // Status - The health status information for the VM. + Status *InstanceViewStatus `json:"status,omitempty"` +} + +// VirtualMachineIdentity identity for the virtual machine. +type VirtualMachineIdentity struct { + // PrincipalID - The principal id of virtual machine identity. This property will only be provided for a system assigned identity. + PrincipalID *string `json:"principalId,omitempty"` + // TenantID - The tenant id associated with the virtual machine. This property will only be provided for a system assigned identity. + TenantID *string `json:"tenantId,omitempty"` + // Type - The type of identity used for the virtual machine. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the virtual machine. Possible values include: 'ResourceIdentityTypeSystemAssigned', 'ResourceIdentityTypeUserAssigned', 'ResourceIdentityTypeSystemAssignedUserAssigned', 'ResourceIdentityTypeNone' + Type ResourceIdentityType `json:"type,omitempty"` + // IdentityIds - The list of user identities associated with the Virtual Machine. The user identity references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/identities/{identityName}'. + IdentityIds *[]string `json:"identityIds,omitempty"` +} + +// VirtualMachineImage describes a Virtual Machine Image. +type VirtualMachineImage struct { + autorest.Response `json:"-"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Location - The supported Azure location of the resource. + Location *string `json:"location,omitempty"` + // Tags - Specifies the tags that are assigned to the virtual machine. For more information about using tags, see [Using tags to organize your Azure resources](https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md). + Tags *map[string]*string `json:"tags,omitempty"` + *VirtualMachineImageProperties `json:"properties,omitempty"` +} + +// VirtualMachineImageProperties describes the properties of a Virtual Machine Image. +type VirtualMachineImageProperties struct { + Plan *PurchasePlan `json:"plan,omitempty"` + OsDiskImage *OSDiskImage `json:"osDiskImage,omitempty"` + DataDiskImages *[]DataDiskImage `json:"dataDiskImages,omitempty"` +} + +// VirtualMachineImageResource virtual machine image resource information. +type VirtualMachineImageResource struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - The name of the resource. + Name *string `json:"name,omitempty"` + // Location - The supported Azure location of the resource. + Location *string `json:"location,omitempty"` + // Tags - Specifies the tags that are assigned to the virtual machine. For more information about using tags, see [Using tags to organize your Azure resources](https://docs.microsoft.com/azure/azure-resource-manager/resource-group-using-tags.md). + Tags *map[string]*string `json:"tags,omitempty"` +} + +// VirtualMachineInstanceView the instance view of a virtual machine. +type VirtualMachineInstanceView struct { + autorest.Response `json:"-"` + // PlatformUpdateDomain - Specifies the update domain of the virtual machine. + PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"` + // PlatformFaultDomain - Specifies the fault domain of the virtual machine. + PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` + // ComputerName - The computer name assigned to the virtual machine. + ComputerName *string `json:"computerName,omitempty"` + // OsName - The Operating System running on the virtual machine. + OsName *string `json:"osName,omitempty"` + // OsVersion - The version of Operating System running on the virtual machine. + OsVersion *string `json:"osVersion,omitempty"` + // RdpThumbPrint - The Remote desktop certificate thumbprint. + RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` + // VMAgent - The VM Agent running on the virtual machine. + VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` + // MaintenanceRedeployStatus - The Maintenance Operation status on the virtual machine. + MaintenanceRedeployStatus *MaintenanceRedeployStatus `json:"maintenanceRedeployStatus,omitempty"` + // Disks - The virtual machine disk information. + Disks *[]DiskInstanceView `json:"disks,omitempty"` + // Extensions - The extensions information. + Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` + // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

    For Linux Virtual Machines, you can easily view the output of your console log.

    For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from the hypervisor. + BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineListResult the List Virtual Machine operation response. +type VirtualMachineListResult struct { + autorest.Response `json:"-"` + // Value - The list of virtual machines. + Value *[]VirtualMachine `json:"value,omitempty"` + // NextLink - The URI to fetch the next page of VMs. Call ListNext() with this URI to fetch the next page of Virtual Machines. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineListResultIterator provides access to a complete listing of VirtualMachine values. +type VirtualMachineListResultIterator struct { + i int + page VirtualMachineListResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VirtualMachineListResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VirtualMachineListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VirtualMachineListResultIterator) Response() VirtualMachineListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VirtualMachineListResultIterator) Value() VirtualMachine { + if !iter.page.NotDone() { + return VirtualMachine{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (vmlr VirtualMachineListResult) IsEmpty() bool { + return vmlr.Value == nil || len(*vmlr.Value) == 0 +} + +// virtualMachineListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vmlr VirtualMachineListResult) virtualMachineListResultPreparer() (*http.Request, error) { + if vmlr.NextLink == nil || len(to.String(vmlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vmlr.NextLink))) +} + +// VirtualMachineListResultPage contains a page of VirtualMachine values. +type VirtualMachineListResultPage struct { + fn func(VirtualMachineListResult) (VirtualMachineListResult, error) + vmlr VirtualMachineListResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualMachineListResultPage) Next() error { + next, err := page.fn(page.vmlr) + if err != nil { + return err + } + page.vmlr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualMachineListResultPage) NotDone() bool { + return !page.vmlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualMachineListResultPage) Response() VirtualMachineListResult { + return page.vmlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualMachineListResultPage) Values() []VirtualMachine { + if page.vmlr.IsEmpty() { + return nil + } + return *page.vmlr.Value +} + +// VirtualMachineProperties describes the properties of a Virtual Machine. +type VirtualMachineProperties struct { + // HardwareProfile - Specifies the hardware settings for the virtual machine. + HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` + // StorageProfile - Specifies the storage settings for the virtual machine disks. + StorageProfile *StorageProfile `json:"storageProfile,omitempty"` + // OsProfile - Specifies the operating system settings for the virtual machine. + OsProfile *OSProfile `json:"osProfile,omitempty"` + // NetworkProfile - Specifies the network interfaces of the virtual machine. + NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` + // DiagnosticsProfile - Specifies the boot diagnostic settings state.

    Minimum api-version: 2015-06-15. + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

    For more information on Azure planned maintainance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set. + AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` + // ProvisioningState - The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // InstanceView - The virtual machine instance view. + InstanceView *VirtualMachineInstanceView `json:"instanceView,omitempty"` + // LicenseType - Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system.

    Possible values are:

    Windows_Client

    Windows_Server

    If this element is included in a request for an update, the value must match the initial value. This value cannot be updated.

    For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Minimum api-version: 2015-06-15 + LicenseType *string `json:"licenseType,omitempty"` + // VMID - Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure IaaS VMs SMBIOS and can be read using platform BIOS commands. + VMID *string `json:"vmId,omitempty"` +} + +// VirtualMachineScaleSet describes a Virtual Machine Scale Set. +type VirtualMachineScaleSet struct { + autorest.Response `json:"-"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` + // Sku - The virtual machine scale set sku. + Sku *Sku `json:"sku,omitempty"` + // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. + Plan *Plan `json:"plan,omitempty"` + *VirtualMachineScaleSetProperties `json:"properties,omitempty"` + // Identity - The identity of the virtual machine scale set, if configured. + Identity *VirtualMachineScaleSetIdentity `json:"identity,omitempty"` + // Zones - The virtual machine scale set zones. + Zones *[]string `json:"zones,omitempty"` +} + +// VirtualMachineScaleSetDataDisk describes a virtual machine scale set data disk. +type VirtualMachineScaleSetDataDisk struct { + // Name - The disk name. + Name *string `json:"name,omitempty"` + // Lun - Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + Lun *int32 `json:"lun,omitempty"` + // Caching - Specifies the caching requirements.

    Possible values are:

    **None**

    **ReadOnly**

    **ReadWrite**

    Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` + // CreateOption - The create option. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + // DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image.

    This value cannot be larger than 1023 GB + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + // ManagedDisk - The managed disk parameters. + ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` +} + +// VirtualMachineScaleSetExtension describes a Virtual Machine Scale Set Extension. +type VirtualMachineScaleSetExtension struct { + autorest.Response `json:"-"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - The name of the extension. + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetExtensionProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetExtensionListResult the List VM scale set extension operation response. +type VirtualMachineScaleSetExtensionListResult struct { + autorest.Response `json:"-"` + // Value - The list of VM scale set extensions. + Value *[]VirtualMachineScaleSetExtension `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of VM scale set extensions. Call ListNext() with this to fetch the next page of VM scale set extensions. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetExtensionListResultIterator provides access to a complete listing of +// VirtualMachineScaleSetExtension values. +type VirtualMachineScaleSetExtensionListResultIterator struct { + i int + page VirtualMachineScaleSetExtensionListResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VirtualMachineScaleSetExtensionListResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VirtualMachineScaleSetExtensionListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VirtualMachineScaleSetExtensionListResultIterator) Response() VirtualMachineScaleSetExtensionListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VirtualMachineScaleSetExtensionListResultIterator) Value() VirtualMachineScaleSetExtension { + if !iter.page.NotDone() { + return VirtualMachineScaleSetExtension{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (vmsselr VirtualMachineScaleSetExtensionListResult) IsEmpty() bool { + return vmsselr.Value == nil || len(*vmsselr.Value) == 0 +} + +// virtualMachineScaleSetExtensionListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vmsselr VirtualMachineScaleSetExtensionListResult) virtualMachineScaleSetExtensionListResultPreparer() (*http.Request, error) { + if vmsselr.NextLink == nil || len(to.String(vmsselr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vmsselr.NextLink))) +} + +// VirtualMachineScaleSetExtensionListResultPage contains a page of VirtualMachineScaleSetExtension values. +type VirtualMachineScaleSetExtensionListResultPage struct { + fn func(VirtualMachineScaleSetExtensionListResult) (VirtualMachineScaleSetExtensionListResult, error) + vmsselr VirtualMachineScaleSetExtensionListResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualMachineScaleSetExtensionListResultPage) Next() error { + next, err := page.fn(page.vmsselr) + if err != nil { + return err + } + page.vmsselr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualMachineScaleSetExtensionListResultPage) NotDone() bool { + return !page.vmsselr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualMachineScaleSetExtensionListResultPage) Response() VirtualMachineScaleSetExtensionListResult { + return page.vmsselr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualMachineScaleSetExtensionListResultPage) Values() []VirtualMachineScaleSetExtension { + if page.vmsselr.IsEmpty() { + return nil + } + return *page.vmsselr.Value +} + +// VirtualMachineScaleSetExtensionProfile describes a virtual machine scale set extension profile. +type VirtualMachineScaleSetExtensionProfile struct { + // Extensions - The virtual machine scale set child extension resources. + Extensions *[]VirtualMachineScaleSetExtension `json:"extensions,omitempty"` +} + +// VirtualMachineScaleSetExtensionProperties describes the properties of a Virtual Machine Scale Set Extension. +type VirtualMachineScaleSetExtensionProperties struct { + // ForceUpdateTag - If a value is provided and is different from the previous value, the extension handler will be forced to update even if the extension configuration has not changed. + ForceUpdateTag *string `json:"forceUpdateTag,omitempty"` + // Publisher - The name of the extension handler publisher. + Publisher *string `json:"publisher,omitempty"` + // Type - Specifies the type of the extension; an example is "CustomScriptExtension". + Type *string `json:"type,omitempty"` + // TypeHandlerVersion - Specifies the version of the script handler. + TypeHandlerVersion *string `json:"typeHandlerVersion,omitempty"` + // AutoUpgradeMinorVersion - Indicates whether the extension should use a newer minor version if one is available at deployment time. Once deployed, however, the extension will not upgrade minor versions unless redeployed, even with this property set to true. + AutoUpgradeMinorVersion *bool `json:"autoUpgradeMinorVersion,omitempty"` + // Settings - Json formatted public settings for the extension. + Settings *map[string]interface{} `json:"settings,omitempty"` + // ProtectedSettings - The extension can contain either protectedSettings or protectedSettingsFromKeyVault or no protected settings at all. + ProtectedSettings *map[string]interface{} `json:"protectedSettings,omitempty"` + // ProvisioningState - The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// VirtualMachineScaleSetExtensionsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetExtensionsCreateOrUpdateFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetExtensionsCreateOrUpdateFuture) Result(client VirtualMachineScaleSetExtensionsClient) (vmsse VirtualMachineScaleSetExtension, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return vmsse, autorest.NewError("compute.VirtualMachineScaleSetExtensionsCreateOrUpdateFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + vmsse, err = client.CreateOrUpdateResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + vmsse, err = client.CreateOrUpdateResponder(resp) + return +} + +// VirtualMachineScaleSetExtensionsDeleteFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetExtensionsDeleteFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetExtensionsDeleteFuture) Result(client VirtualMachineScaleSetExtensionsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetExtensionsDeleteFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.DeleteResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.DeleteResponder(resp) + return +} + +// VirtualMachineScaleSetIdentity identity for the virtual machine scale set. +type VirtualMachineScaleSetIdentity struct { + // PrincipalID - The principal id of virtual machine scale set identity. This property will only be provided for a system assigned identity. + PrincipalID *string `json:"principalId,omitempty"` + // TenantID - The tenant id associated with the virtual machine scale set. This property will only be provided for a system assigned identity. + TenantID *string `json:"tenantId,omitempty"` + // Type - The type of identity used for the virtual machine scale set. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the virtual machine scale set. Possible values include: 'ResourceIdentityTypeSystemAssigned', 'ResourceIdentityTypeUserAssigned', 'ResourceIdentityTypeSystemAssignedUserAssigned', 'ResourceIdentityTypeNone' + Type ResourceIdentityType `json:"type,omitempty"` + // IdentityIds - The list of user identities associated with the virtual machine scale set. The user identity references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/identities/{identityName}'. + IdentityIds *[]string `json:"identityIds,omitempty"` +} + +// VirtualMachineScaleSetInstanceView the instance view of a virtual machine scale set. +type VirtualMachineScaleSetInstanceView struct { + autorest.Response `json:"-"` + // VirtualMachine - The instance view status summary for the virtual machine scale set. + VirtualMachine *VirtualMachineScaleSetInstanceViewStatusesSummary `json:"virtualMachine,omitempty"` + // Extensions - The extensions information. + Extensions *[]VirtualMachineScaleSetVMExtensionsSummary `json:"extensions,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` +} + +// VirtualMachineScaleSetInstanceViewStatusesSummary instance view statuses summary for virtual machines of a virtual +// machine scale set. +type VirtualMachineScaleSetInstanceViewStatusesSummary struct { + // StatusesSummary - The extensions information. + StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"` +} + +// VirtualMachineScaleSetIPConfiguration describes a virtual machine scale set network profile's IP configuration. +type VirtualMachineScaleSetIPConfiguration struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - The IP configuration name. + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetIPConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetIPConfigurationProperties describes a virtual machine scale set network profile's IP +// configuration properties. +type VirtualMachineScaleSetIPConfigurationProperties struct { + // Subnet - Specifies the identifier of the subnet. + Subnet *APIEntityReference `json:"subnet,omitempty"` + // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface. + Primary *bool `json:"primary,omitempty"` + // PublicIPAddressConfiguration - The publicIPAddressConfiguration. + PublicIPAddressConfiguration *VirtualMachineScaleSetPublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"` + // PrivateIPAddressVersion - Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPv4', 'IPv6' + PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"` + // ApplicationGatewayBackendAddressPools - Specifies an array of references to backend address pools of application gateways. A scale set can reference backend address pools of multiple application gateways. Multiple scale sets cannot use the same application gateway. + ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` + // LoadBalancerBackendAddressPools - Specifies an array of references to backend address pools of load balancers. A scale set can reference backend address pools of one public and one internal load balancer. Multiple scale sets cannot use the same load balancer. + LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` + // LoadBalancerInboundNatPools - Specifies an array of references to inbound Nat pools of the load balancers. A scale set can reference inbound nat pools of one public and one internal load balancer. Multiple scale sets cannot use the same load balancer + LoadBalancerInboundNatPools *[]SubResource `json:"loadBalancerInboundNatPools,omitempty"` +} + +// VirtualMachineScaleSetListResult the List Virtual Machine operation response. +type VirtualMachineScaleSetListResult struct { + autorest.Response `json:"-"` + // Value - The list of virtual machine scale sets. + Value *[]VirtualMachineScaleSet `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Virtual Machine Scale Sets. Call ListNext() with this to fetch the next page of VMSS. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetListResultIterator provides access to a complete listing of VirtualMachineScaleSet values. +type VirtualMachineScaleSetListResultIterator struct { + i int + page VirtualMachineScaleSetListResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VirtualMachineScaleSetListResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VirtualMachineScaleSetListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VirtualMachineScaleSetListResultIterator) Response() VirtualMachineScaleSetListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VirtualMachineScaleSetListResultIterator) Value() VirtualMachineScaleSet { + if !iter.page.NotDone() { + return VirtualMachineScaleSet{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (vmsslr VirtualMachineScaleSetListResult) IsEmpty() bool { + return vmsslr.Value == nil || len(*vmsslr.Value) == 0 +} + +// virtualMachineScaleSetListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vmsslr VirtualMachineScaleSetListResult) virtualMachineScaleSetListResultPreparer() (*http.Request, error) { + if vmsslr.NextLink == nil || len(to.String(vmsslr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vmsslr.NextLink))) +} + +// VirtualMachineScaleSetListResultPage contains a page of VirtualMachineScaleSet values. +type VirtualMachineScaleSetListResultPage struct { + fn func(VirtualMachineScaleSetListResult) (VirtualMachineScaleSetListResult, error) + vmsslr VirtualMachineScaleSetListResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualMachineScaleSetListResultPage) Next() error { + next, err := page.fn(page.vmsslr) + if err != nil { + return err + } + page.vmsslr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualMachineScaleSetListResultPage) NotDone() bool { + return !page.vmsslr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualMachineScaleSetListResultPage) Response() VirtualMachineScaleSetListResult { + return page.vmsslr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualMachineScaleSetListResultPage) Values() []VirtualMachineScaleSet { + if page.vmsslr.IsEmpty() { + return nil + } + return *page.vmsslr.Value +} + +// VirtualMachineScaleSetListSkusResult the Virtual Machine Scale Set List Skus operation response. +type VirtualMachineScaleSetListSkusResult struct { + autorest.Response `json:"-"` + // Value - The list of skus available for the virtual machine scale set. + Value *[]VirtualMachineScaleSetSku `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Virtual Machine Scale Set Skus. Call ListNext() with this to fetch the next page of VMSS Skus. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetListSkusResultIterator provides access to a complete listing of VirtualMachineScaleSetSku +// values. +type VirtualMachineScaleSetListSkusResultIterator struct { + i int + page VirtualMachineScaleSetListSkusResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VirtualMachineScaleSetListSkusResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VirtualMachineScaleSetListSkusResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VirtualMachineScaleSetListSkusResultIterator) Response() VirtualMachineScaleSetListSkusResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VirtualMachineScaleSetListSkusResultIterator) Value() VirtualMachineScaleSetSku { + if !iter.page.NotDone() { + return VirtualMachineScaleSetSku{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (vmsslsr VirtualMachineScaleSetListSkusResult) IsEmpty() bool { + return vmsslsr.Value == nil || len(*vmsslsr.Value) == 0 +} + +// virtualMachineScaleSetListSkusResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vmsslsr VirtualMachineScaleSetListSkusResult) virtualMachineScaleSetListSkusResultPreparer() (*http.Request, error) { + if vmsslsr.NextLink == nil || len(to.String(vmsslsr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vmsslsr.NextLink))) +} + +// VirtualMachineScaleSetListSkusResultPage contains a page of VirtualMachineScaleSetSku values. +type VirtualMachineScaleSetListSkusResultPage struct { + fn func(VirtualMachineScaleSetListSkusResult) (VirtualMachineScaleSetListSkusResult, error) + vmsslsr VirtualMachineScaleSetListSkusResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualMachineScaleSetListSkusResultPage) Next() error { + next, err := page.fn(page.vmsslsr) + if err != nil { + return err + } + page.vmsslsr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualMachineScaleSetListSkusResultPage) NotDone() bool { + return !page.vmsslsr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualMachineScaleSetListSkusResultPage) Response() VirtualMachineScaleSetListSkusResult { + return page.vmsslsr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualMachineScaleSetListSkusResultPage) Values() []VirtualMachineScaleSetSku { + if page.vmsslsr.IsEmpty() { + return nil + } + return *page.vmsslsr.Value +} + +// VirtualMachineScaleSetListWithLinkResult the List Virtual Machine operation response. +type VirtualMachineScaleSetListWithLinkResult struct { + autorest.Response `json:"-"` + // Value - The list of virtual machine scale sets. + Value *[]VirtualMachineScaleSet `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Virtual Machine Scale Sets. Call ListNext() with this to fetch the next page of Virtual Machine Scale Sets. + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetListWithLinkResultIterator provides access to a complete listing of VirtualMachineScaleSet +// values. +type VirtualMachineScaleSetListWithLinkResultIterator struct { + i int + page VirtualMachineScaleSetListWithLinkResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VirtualMachineScaleSetListWithLinkResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VirtualMachineScaleSetListWithLinkResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VirtualMachineScaleSetListWithLinkResultIterator) Response() VirtualMachineScaleSetListWithLinkResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VirtualMachineScaleSetListWithLinkResultIterator) Value() VirtualMachineScaleSet { + if !iter.page.NotDone() { + return VirtualMachineScaleSet{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (vmsslwlr VirtualMachineScaleSetListWithLinkResult) IsEmpty() bool { + return vmsslwlr.Value == nil || len(*vmsslwlr.Value) == 0 +} + +// virtualMachineScaleSetListWithLinkResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vmsslwlr VirtualMachineScaleSetListWithLinkResult) virtualMachineScaleSetListWithLinkResultPreparer() (*http.Request, error) { + if vmsslwlr.NextLink == nil || len(to.String(vmsslwlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vmsslwlr.NextLink))) +} + +// VirtualMachineScaleSetListWithLinkResultPage contains a page of VirtualMachineScaleSet values. +type VirtualMachineScaleSetListWithLinkResultPage struct { + fn func(VirtualMachineScaleSetListWithLinkResult) (VirtualMachineScaleSetListWithLinkResult, error) + vmsslwlr VirtualMachineScaleSetListWithLinkResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualMachineScaleSetListWithLinkResultPage) Next() error { + next, err := page.fn(page.vmsslwlr) + if err != nil { + return err + } + page.vmsslwlr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualMachineScaleSetListWithLinkResultPage) NotDone() bool { + return !page.vmsslwlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualMachineScaleSetListWithLinkResultPage) Response() VirtualMachineScaleSetListWithLinkResult { + return page.vmsslwlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualMachineScaleSetListWithLinkResultPage) Values() []VirtualMachineScaleSet { + if page.vmsslwlr.IsEmpty() { + return nil + } + return *page.vmsslwlr.Value +} + +// VirtualMachineScaleSetManagedDiskParameters describes the parameters of a ScaleSet managed disk. +type VirtualMachineScaleSetManagedDiskParameters struct { + // StorageAccountType - Specifies the storage account type for the managed disk. Possible values are: Standard_LRS or Premium_LRS. Possible values include: 'StandardLRS', 'PremiumLRS' + StorageAccountType StorageAccountTypes `json:"storageAccountType,omitempty"` +} + +// VirtualMachineScaleSetNetworkConfiguration describes a virtual machine scale set network profile's network +// configurations. +type VirtualMachineScaleSetNetworkConfiguration struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - The network configuration name. + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetNetworkConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetNetworkConfigurationDNSSettings describes a virtual machines scale sets network +// configuration's DNS settings. +type VirtualMachineScaleSetNetworkConfigurationDNSSettings struct { + // DNSServers - List of DNS servers IP addresses + DNSServers *[]string `json:"dnsServers,omitempty"` +} + +// VirtualMachineScaleSetNetworkConfigurationProperties describes a virtual machine scale set network profile's IP +// configuration. +type VirtualMachineScaleSetNetworkConfigurationProperties struct { + // Primary - Specifies the primary network interface in case the virtual machine has more than 1 network interface. + Primary *bool `json:"primary,omitempty"` + // EnableAcceleratedNetworking - Specifies whether the network interface is accelerated networking-enabled. + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"` + // NetworkSecurityGroup - The network security group. + NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` + // DNSSettings - The dns settings to be applied on the network interfaces. + DNSSettings *VirtualMachineScaleSetNetworkConfigurationDNSSettings `json:"dnsSettings,omitempty"` + // IPConfigurations - Specifies the IP configurations of the network interface. + IPConfigurations *[]VirtualMachineScaleSetIPConfiguration `json:"ipConfigurations,omitempty"` + // EnableIPForwarding - Whether IP forwarding enabled on this NIC. + EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"` +} + +// VirtualMachineScaleSetNetworkProfile describes a virtual machine scale set network profile. +type VirtualMachineScaleSetNetworkProfile struct { + // HealthProbe - A reference to a load balancer probe used to determine the health of an instance in the virtual machine scale set. The reference will be in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/probes/{probeName}'. + HealthProbe *APIEntityReference `json:"healthProbe,omitempty"` + // NetworkInterfaceConfigurations - The list of network configurations. + NetworkInterfaceConfigurations *[]VirtualMachineScaleSetNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` +} + +// VirtualMachineScaleSetOSDisk describes a virtual machine scale set operating system disk. +type VirtualMachineScaleSetOSDisk struct { + // Name - The disk name. + Name *string `json:"name,omitempty"` + // Caching - Specifies the caching requirements.

    Possible values are:

    **None**

    **ReadOnly**

    **ReadWrite**

    Default: **None for Standard storage. ReadOnly for Premium storage**. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` + // CreateOption - Specifies how the virtual machines in the scale set should be created.

    The only allowed value is: **FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach' + CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"` + // OsType - This property allows you to specify the type of the OS that is included in the disk if creating a VM from user-image or a specialized VHD.

    Possible values are:

    **Windows**

    **Linux**. Possible values include: 'Windows', 'Linux' + OsType OperatingSystemTypes `json:"osType,omitempty"` + // Image - Specifies information about the unmanaged user image to base the scale set on. + Image *VirtualHardDisk `json:"image,omitempty"` + // VhdContainers - Specifies the container urls that are used to store operating system disks for the scale set. + VhdContainers *[]string `json:"vhdContainers,omitempty"` + // ManagedDisk - The managed disk parameters. + ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` +} + +// VirtualMachineScaleSetOSProfile describes a virtual machine scale set OS profile. +type VirtualMachineScaleSetOSProfile struct { + // ComputerNamePrefix - Specifies the computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long. + ComputerNamePrefix *string `json:"computerNamePrefix,omitempty"` + // AdminUsername - Specifies the name of the administrator account.

    **Windows-only restriction:** Cannot end in "."

    **Disallowed values:** "administrator", "admin", "user", "user1", "test", "user2", "test1", "user3", "admin1", "1", "123", "a", "actuser", "adm", "admin2", "aspnet", "backup", "console", "david", "guest", "john", "owner", "root", "server", "sql", "support", "support_388945a0", "sys", "test2", "test3", "user4", "user5".

    **Minimum-length (Linux):** 1 character

    **Max-length (Linux):** 64 characters

    **Max-length (Windows):** 20 characters

  • For root access to the Linux VM, see [Using root privileges on Linux virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-use-root-privileges?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)
  • For a list of built-in system users on Linux that should not be used in this field, see [Selecting User Names for Linux on Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-usernames?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) + AdminUsername *string `json:"adminUsername,omitempty"` + // AdminPassword - Specifies the password of the administrator account.

    **Minimum-length (Windows):** 8 characters

    **Minimum-length (Linux):** 6 characters

    **Max-length (Windows):** 123 characters

    **Max-length (Linux):** 72 characters

    **Complexity requirements:** 3 out of 4 conditions below need to be fulfilled
    Has lower characters
    Has upper characters
    Has a digit
    Has a special character (Regex match [\W_])

    **Disallowed values:** "abc@123", "P@$$w0rd", "P@ssw0rd", "P@ssword123", "Pa$$word", "pass@word1", "Password!", "Password1", "Password22", "iloveyou!"

    For resetting the password, see [How to reset the Remote Desktop service or its login password in a Windows VM](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-reset-rdp?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    For resetting root password, see [Manage users, SSH, and check or repair disks on Azure Linux VMs using the VMAccess Extension](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-vmaccess-extension?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json#reset-root-password) + AdminPassword *string `json:"adminPassword,omitempty"` + // CustomData - Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes.

    For using cloud-init for your VM, see [Using cloud-init to customize a Linux VM during creation](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-using-cloud-init?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json) + CustomData *string `json:"customData,omitempty"` + // WindowsConfiguration - Specifies Windows operating system settings on the virtual machine. + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` + // LinuxConfiguration - Specifies the Linux operating system settings on the virtual machine.

    For a list of supported Linux distributions, see [Linux on Azure-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-endorsed-distros?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json)

    For running non-endorsed distributions, see [Information for Non-Endorsed Distributions](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-linux-create-upload-generic?toc=%2fazure%2fvirtual-machines%2flinux%2ftoc.json). + LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` + // Secrets - Specifies set of certificates that should be installed onto the virtual machines in the scale set. + Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` +} + +// VirtualMachineScaleSetProperties describes the properties of a Virtual Machine Scale Set. +type VirtualMachineScaleSetProperties struct { + // UpgradePolicy - The upgrade policy. + UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` + // VirtualMachineProfile - The virtual machine profile. + VirtualMachineProfile *VirtualMachineScaleSetVMProfile `json:"virtualMachineProfile,omitempty"` + // ProvisioningState - The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // Overprovision - Specifies whether the Virtual Machine Scale Set should be overprovisioned. + Overprovision *bool `json:"overprovision,omitempty"` + // UniqueID - Specifies the ID which uniquely identifies a Virtual Machine Scale Set. + UniqueID *string `json:"uniqueId,omitempty"` + // SinglePlacementGroup - When true this limits the scale set to a single placement group, of max size 100 virtual machines. + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` +} + +// VirtualMachineScaleSetPublicIPAddressConfiguration describes a virtual machines scale set IP Configuration's +// PublicIPAddress configuration +type VirtualMachineScaleSetPublicIPAddressConfiguration struct { + // Name - The publicIP address configuration name. + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetPublicIPAddressConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings describes a virtual machines scale sets network +// configuration's DNS settings. +type VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings struct { + // DomainNameLabel - The Domain name label.The concatenation of the domain name label and vm index will be the domain name labels of the PublicIPAddress resources that will be created + DomainNameLabel *string `json:"domainNameLabel,omitempty"` +} + +// VirtualMachineScaleSetPublicIPAddressConfigurationProperties describes a virtual machines scale set IP +// Configuration's PublicIPAddress configuration +type VirtualMachineScaleSetPublicIPAddressConfigurationProperties struct { + // IdleTimeoutInMinutes - The idle timeout of the public IP address. + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + // DNSSettings - The dns settings to be applied on the publicIP addresses . + DNSSettings *VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings `json:"dnsSettings,omitempty"` +} + +// VirtualMachineScaleSetRollingUpgradesCancelFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetRollingUpgradesCancelFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetRollingUpgradesCancelFuture) Result(client VirtualMachineScaleSetRollingUpgradesClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetRollingUpgradesCancelFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.CancelResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.CancelResponder(resp) + return +} + +// VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture an abstraction for monitoring and retrieving the results +// of a long-running operation. +type VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture) Result(client VirtualMachineScaleSetRollingUpgradesClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.StartOSUpgradeResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.StartOSUpgradeResponder(resp) + return +} + +// VirtualMachineScaleSetsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetsCreateOrUpdateFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetsCreateOrUpdateFuture) Result(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return vmss, autorest.NewError("compute.VirtualMachineScaleSetsCreateOrUpdateFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + vmss, err = client.CreateOrUpdateResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + vmss, err = client.CreateOrUpdateResponder(resp) + return +} + +// VirtualMachineScaleSetsDeallocateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetsDeallocateFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetsDeallocateFuture) Result(client VirtualMachineScaleSetsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetsDeallocateFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.DeallocateResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.DeallocateResponder(resp) + return +} + +// VirtualMachineScaleSetsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetsDeleteFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetsDeleteFuture) Result(client VirtualMachineScaleSetsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetsDeleteFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.DeleteResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.DeleteResponder(resp) + return +} + +// VirtualMachineScaleSetsDeleteInstancesFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetsDeleteInstancesFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetsDeleteInstancesFuture) Result(client VirtualMachineScaleSetsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetsDeleteInstancesFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.DeleteInstancesResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.DeleteInstancesResponder(resp) + return +} + +// VirtualMachineScaleSetSku describes an available virtual machine scale set sku. +type VirtualMachineScaleSetSku struct { + // ResourceType - The type of resource the sku applies to. + ResourceType *string `json:"resourceType,omitempty"` + // Sku - The Sku. + Sku *Sku `json:"sku,omitempty"` + // Capacity - Specifies the number of virtual machines in the scale set. + Capacity *VirtualMachineScaleSetSkuCapacity `json:"capacity,omitempty"` +} + +// VirtualMachineScaleSetSkuCapacity describes scaling information of a sku. +type VirtualMachineScaleSetSkuCapacity struct { + // Minimum - The minimum capacity. + Minimum *int64 `json:"minimum,omitempty"` + // Maximum - The maximum capacity that can be set. + Maximum *int64 `json:"maximum,omitempty"` + // DefaultCapacity - The default capacity. + DefaultCapacity *int64 `json:"defaultCapacity,omitempty"` + // ScaleType - The scale type applicable to the sku. Possible values include: 'VirtualMachineScaleSetSkuScaleTypeAutomatic', 'VirtualMachineScaleSetSkuScaleTypeNone' + ScaleType VirtualMachineScaleSetSkuScaleType `json:"scaleType,omitempty"` +} + +// VirtualMachineScaleSetsPowerOffFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetsPowerOffFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetsPowerOffFuture) Result(client VirtualMachineScaleSetsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetsPowerOffFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.PowerOffResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.PowerOffResponder(resp) + return +} + +// VirtualMachineScaleSetsReimageAllFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetsReimageAllFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetsReimageAllFuture) Result(client VirtualMachineScaleSetsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetsReimageAllFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.ReimageAllResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.ReimageAllResponder(resp) + return +} + +// VirtualMachineScaleSetsReimageFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetsReimageFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetsReimageFuture) Result(client VirtualMachineScaleSetsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetsReimageFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.ReimageResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.ReimageResponder(resp) + return +} + +// VirtualMachineScaleSetsRestartFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetsRestartFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetsRestartFuture) Result(client VirtualMachineScaleSetsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetsRestartFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.RestartResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.RestartResponder(resp) + return +} + +// VirtualMachineScaleSetsStartFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetsStartFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetsStartFuture) Result(client VirtualMachineScaleSetsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetsStartFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.StartResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.StartResponder(resp) + return +} + +// VirtualMachineScaleSetStorageProfile describes a virtual machine scale set storage profile. +type VirtualMachineScaleSetStorageProfile struct { + // ImageReference - Specifies information about the image to use. You can specify information about platform images, marketplace images, or virtual machine images. This element is required when you want to use a platform image, marketplace image, or virtual machine image, but is not used in other creation operations. + ImageReference *ImageReference `json:"imageReference,omitempty"` + // OsDisk - Specifies information about the operating system disk used by the virtual machines in the scale set.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). + OsDisk *VirtualMachineScaleSetOSDisk `json:"osDisk,omitempty"` + // DataDisks - Specifies the parameters that are used to add data disks to the virtual machines in the scale set.

    For more information about disks, see [About disks and VHDs for Azure virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-about-disks-vhds?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json). + DataDisks *[]VirtualMachineScaleSetDataDisk `json:"dataDisks,omitempty"` +} + +// VirtualMachineScaleSetsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetsUpdateFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetsUpdateFuture) Result(client VirtualMachineScaleSetsClient) (vmss VirtualMachineScaleSet, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return vmss, autorest.NewError("compute.VirtualMachineScaleSetsUpdateFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + vmss, err = client.UpdateResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + vmss, err = client.UpdateResponder(resp) + return +} + +// VirtualMachineScaleSetsUpdateInstancesFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachineScaleSetsUpdateInstancesFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetsUpdateInstancesFuture) Result(client VirtualMachineScaleSetsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetsUpdateInstancesFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.UpdateInstancesResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.UpdateInstancesResponder(resp) + return +} + +// VirtualMachineScaleSetUpdate describes a Virtual Machine Scale Set. +type VirtualMachineScaleSetUpdate struct { + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` + // Sku - The virtual machine scale set sku. + Sku *Sku `json:"sku,omitempty"` + // Plan - The purchase plan when deploying a virtual machine scale set from VM Marketplace images. + Plan *Plan `json:"plan,omitempty"` + *VirtualMachineScaleSetUpdateProperties `json:"properties,omitempty"` + // Identity - The identity of the virtual machine scale set, if configured. + Identity *VirtualMachineScaleSetIdentity `json:"identity,omitempty"` +} + +// VirtualMachineScaleSetUpdateIPConfiguration describes a virtual machine scale set network profile's IP +// configuration. +type VirtualMachineScaleSetUpdateIPConfiguration struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - The IP configuration name. + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetUpdateIPConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetUpdateIPConfigurationProperties describes a virtual machine scale set network profile's IP +// configuration properties. +type VirtualMachineScaleSetUpdateIPConfigurationProperties struct { + // Subnet - The subnet. + Subnet *APIEntityReference `json:"subnet,omitempty"` + // Primary - Specifies the primary IP Configuration in case the network interface has more than one IP Configuration. + Primary *bool `json:"primary,omitempty"` + // PublicIPAddressConfiguration - The publicIPAddressConfiguration. + PublicIPAddressConfiguration *VirtualMachineScaleSetUpdatePublicIPAddressConfiguration `json:"publicIPAddressConfiguration,omitempty"` + // PrivateIPAddressVersion - Available from Api-Version 2017-03-30 onwards, it represents whether the specific ipconfiguration is IPv4 or IPv6. Default is taken as IPv4. Possible values are: 'IPv4' and 'IPv6'. Possible values include: 'IPv4', 'IPv6' + PrivateIPAddressVersion IPVersion `json:"privateIPAddressVersion,omitempty"` + // ApplicationGatewayBackendAddressPools - The application gateway backend address pools. + ApplicationGatewayBackendAddressPools *[]SubResource `json:"applicationGatewayBackendAddressPools,omitempty"` + // LoadBalancerBackendAddressPools - The load balancer backend address pools. + LoadBalancerBackendAddressPools *[]SubResource `json:"loadBalancerBackendAddressPools,omitempty"` + // LoadBalancerInboundNatPools - The load balancer inbound nat pools. + LoadBalancerInboundNatPools *[]SubResource `json:"loadBalancerInboundNatPools,omitempty"` +} + +// VirtualMachineScaleSetUpdateNetworkConfiguration describes a virtual machine scale set network profile's network +// configurations. +type VirtualMachineScaleSetUpdateNetworkConfiguration struct { + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - The network configuration name. + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetUpdateNetworkConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetUpdateNetworkConfigurationProperties describes a virtual machine scale set updatable network +// profile's IP configuration.Use this object for updating network profile's IP Configuration. +type VirtualMachineScaleSetUpdateNetworkConfigurationProperties struct { + // Primary - Whether this is a primary NIC on a virtual machine. + Primary *bool `json:"primary,omitempty"` + // EnableAcceleratedNetworking - Specifies whether the network interface is accelerated networking-enabled. + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking,omitempty"` + // NetworkSecurityGroup - The network security group. + NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` + // DNSSettings - The dns settings to be applied on the network interfaces. + DNSSettings *VirtualMachineScaleSetNetworkConfigurationDNSSettings `json:"dnsSettings,omitempty"` + // IPConfigurations - The virtual machine scale set IP Configuration. + IPConfigurations *[]VirtualMachineScaleSetUpdateIPConfiguration `json:"ipConfigurations,omitempty"` + // EnableIPForwarding - Whether IP forwarding enabled on this NIC. + EnableIPForwarding *bool `json:"enableIPForwarding,omitempty"` +} + +// VirtualMachineScaleSetUpdateNetworkProfile describes a virtual machine scale set network profile. +type VirtualMachineScaleSetUpdateNetworkProfile struct { + // NetworkInterfaceConfigurations - The list of network configurations. + NetworkInterfaceConfigurations *[]VirtualMachineScaleSetUpdateNetworkConfiguration `json:"networkInterfaceConfigurations,omitempty"` +} + +// VirtualMachineScaleSetUpdateOSDisk describes virtual machine scale set operating system disk Update Object. This +// should be used for Updating VMSS OS Disk. +type VirtualMachineScaleSetUpdateOSDisk struct { + // Caching - The caching type. Possible values include: 'CachingTypesNone', 'CachingTypesReadOnly', 'CachingTypesReadWrite' + Caching CachingTypes `json:"caching,omitempty"` + // WriteAcceleratorEnabled - Specifies whether writeAccelerator should be enabled or disabled on the disk. + WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"` + // Image - The Source User Image VirtualHardDisk. This VirtualHardDisk will be copied before using it to attach to the Virtual Machine. If SourceImage is provided, the destination VirtualHardDisk should not exist. + Image *VirtualHardDisk `json:"image,omitempty"` + // VhdContainers - The list of virtual hard disk container uris. + VhdContainers *[]string `json:"vhdContainers,omitempty"` + // ManagedDisk - The managed disk parameters. + ManagedDisk *VirtualMachineScaleSetManagedDiskParameters `json:"managedDisk,omitempty"` +} + +// VirtualMachineScaleSetUpdateOSProfile describes a virtual machine scale set OS profile. +type VirtualMachineScaleSetUpdateOSProfile struct { + // CustomData - A base-64 encoded string of custom data. + CustomData *string `json:"customData,omitempty"` + // WindowsConfiguration - The Windows Configuration of the OS profile. + WindowsConfiguration *WindowsConfiguration `json:"windowsConfiguration,omitempty"` + // LinuxConfiguration - The Linux Configuration of the OS profile. + LinuxConfiguration *LinuxConfiguration `json:"linuxConfiguration,omitempty"` + // Secrets - The List of certificates for addition to the VM. + Secrets *[]VaultSecretGroup `json:"secrets,omitempty"` +} + +// VirtualMachineScaleSetUpdateProperties describes the properties of a Virtual Machine Scale Set. +type VirtualMachineScaleSetUpdateProperties struct { + // UpgradePolicy - The upgrade policy. + UpgradePolicy *UpgradePolicy `json:"upgradePolicy,omitempty"` + // VirtualMachineProfile - The virtual machine profile. + VirtualMachineProfile *VirtualMachineScaleSetUpdateVMProfile `json:"virtualMachineProfile,omitempty"` + // Overprovision - Specifies whether the Virtual Machine Scale Set should be overprovisioned. + Overprovision *bool `json:"overprovision,omitempty"` + // SinglePlacementGroup - When true this limits the scale set to a single placement group, of max size 100 virtual machines. + SinglePlacementGroup *bool `json:"singlePlacementGroup,omitempty"` +} + +// VirtualMachineScaleSetUpdatePublicIPAddressConfiguration describes a virtual machines scale set IP Configuration's +// PublicIPAddress configuration +type VirtualMachineScaleSetUpdatePublicIPAddressConfiguration struct { + // Name - The publicIP address configuration name. + Name *string `json:"name,omitempty"` + *VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties `json:"properties,omitempty"` +} + +// VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties describes a virtual machines scale set IP +// Configuration's PublicIPAddress configuration +type VirtualMachineScaleSetUpdatePublicIPAddressConfigurationProperties struct { + // IdleTimeoutInMinutes - The idle timeout of the public IP address. + IdleTimeoutInMinutes *int32 `json:"idleTimeoutInMinutes,omitempty"` + // DNSSettings - The dns settings to be applied on the publicIP addresses . + DNSSettings *VirtualMachineScaleSetPublicIPAddressConfigurationDNSSettings `json:"dnsSettings,omitempty"` +} + +// VirtualMachineScaleSetUpdateStorageProfile describes a virtual machine scale set storage profile. +type VirtualMachineScaleSetUpdateStorageProfile struct { + // ImageReference - The image reference. + ImageReference *ImageReference `json:"imageReference,omitempty"` + // OsDisk - The OS disk. + OsDisk *VirtualMachineScaleSetUpdateOSDisk `json:"osDisk,omitempty"` + // DataDisks - The data disks. + DataDisks *[]VirtualMachineScaleSetDataDisk `json:"dataDisks,omitempty"` +} + +// VirtualMachineScaleSetUpdateVMProfile describes a virtual machine scale set virtual machine profile. +type VirtualMachineScaleSetUpdateVMProfile struct { + // OsProfile - The virtual machine scale set OS profile. + OsProfile *VirtualMachineScaleSetUpdateOSProfile `json:"osProfile,omitempty"` + // StorageProfile - The virtual machine scale set storage profile. + StorageProfile *VirtualMachineScaleSetUpdateStorageProfile `json:"storageProfile,omitempty"` + // NetworkProfile - The virtual machine scale set network profile. + NetworkProfile *VirtualMachineScaleSetUpdateNetworkProfile `json:"networkProfile,omitempty"` + // DiagnosticsProfile - The virtual machine scale set diagnostics profile. + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + // ExtensionProfile - The virtual machine scale set extension profile. + ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"` + // LicenseType - The license type, which is for bring your own license scenario. + LicenseType *string `json:"licenseType,omitempty"` +} + +// VirtualMachineScaleSetVM describes a virtual machine scale set virtual machine. +type VirtualMachineScaleSetVM struct { + autorest.Response `json:"-"` + // ID - Resource Id + ID *string `json:"id,omitempty"` + // Name - Resource name + Name *string `json:"name,omitempty"` + // Type - Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags *map[string]*string `json:"tags,omitempty"` + // InstanceID - The virtual machine instance ID. + InstanceID *string `json:"instanceId,omitempty"` + // Sku - The virtual machine SKU. + Sku *Sku `json:"sku,omitempty"` + *VirtualMachineScaleSetVMProperties `json:"properties,omitempty"` + // Plan - Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**. + Plan *Plan `json:"plan,omitempty"` + // Resources - The virtual machine child extension resources. + Resources *[]VirtualMachineExtension `json:"resources,omitempty"` +} + +// VirtualMachineScaleSetVMExtensionsSummary extensions summary for virtual machines of a virtual machine scale set. +type VirtualMachineScaleSetVMExtensionsSummary struct { + // Name - The extension name. + Name *string `json:"name,omitempty"` + // StatusesSummary - The extensions information. + StatusesSummary *[]VirtualMachineStatusCodeCount `json:"statusesSummary,omitempty"` +} + +// VirtualMachineScaleSetVMInstanceIDs specifies a list of virtual machine instance IDs from the VM scale set. +type VirtualMachineScaleSetVMInstanceIDs struct { + // InstanceIds - The virtual machine scale set instance ids. Omitting the virtual machine scale set instance ids will result in the operation being performed on all virtual machines in the virtual machine scale set. + InstanceIds *[]string `json:"instanceIds,omitempty"` +} + +// VirtualMachineScaleSetVMInstanceRequiredIDs specifies a list of virtual machine instance IDs from the VM scale set. +type VirtualMachineScaleSetVMInstanceRequiredIDs struct { + // InstanceIds - The virtual machine scale set instance ids. + InstanceIds *[]string `json:"instanceIds,omitempty"` +} + +// VirtualMachineScaleSetVMInstanceView the instance view of a virtual machine scale set VM. +type VirtualMachineScaleSetVMInstanceView struct { + autorest.Response `json:"-"` + // PlatformUpdateDomain - The Update Domain count. + PlatformUpdateDomain *int32 `json:"platformUpdateDomain,omitempty"` + // PlatformFaultDomain - The Fault Domain count. + PlatformFaultDomain *int32 `json:"platformFaultDomain,omitempty"` + // RdpThumbPrint - The Remote desktop certificate thumbprint. + RdpThumbPrint *string `json:"rdpThumbPrint,omitempty"` + // VMAgent - The VM Agent running on the virtual machine. + VMAgent *VirtualMachineAgentInstanceView `json:"vmAgent,omitempty"` + // Disks - The disks information. + Disks *[]DiskInstanceView `json:"disks,omitempty"` + // Extensions - The extensions information. + Extensions *[]VirtualMachineExtensionInstanceView `json:"extensions,omitempty"` + // VMHealth - The health status for the VM. + VMHealth *VirtualMachineHealthStatus `json:"vmHealth,omitempty"` + // BootDiagnostics - Boot Diagnostics is a debugging feature which allows you to view Console Output and Screenshot to diagnose VM status.

    For Linux Virtual Machines, you can easily view the output of your console log.

    For both Windows and Linux virtual machines, Azure also enables you to see a screenshot of the VM from the hypervisor. + BootDiagnostics *BootDiagnosticsInstanceView `json:"bootDiagnostics,omitempty"` + // Statuses - The resource status information. + Statuses *[]InstanceViewStatus `json:"statuses,omitempty"` + // PlacementGroupID - The placement group in which the VM is running. If the VM is deallocated it will not have a placementGroupId. + PlacementGroupID *string `json:"placementGroupId,omitempty"` +} + +// VirtualMachineScaleSetVMListResult the List Virtual Machine Scale Set VMs operation response. +type VirtualMachineScaleSetVMListResult struct { + autorest.Response `json:"-"` + // Value - The list of virtual machine scale sets VMs. + Value *[]VirtualMachineScaleSetVM `json:"value,omitempty"` + // NextLink - The uri to fetch the next page of Virtual Machine Scale Set VMs. Call ListNext() with this to fetch the next page of VMSS VMs + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualMachineScaleSetVMListResultIterator provides access to a complete listing of VirtualMachineScaleSetVM values. +type VirtualMachineScaleSetVMListResultIterator struct { + i int + page VirtualMachineScaleSetVMListResultPage +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *VirtualMachineScaleSetVMListResultIterator) Next() error { + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err := iter.page.Next() + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter VirtualMachineScaleSetVMListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter VirtualMachineScaleSetVMListResultIterator) Response() VirtualMachineScaleSetVMListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter VirtualMachineScaleSetVMListResultIterator) Value() VirtualMachineScaleSetVM { + if !iter.page.NotDone() { + return VirtualMachineScaleSetVM{} + } + return iter.page.Values()[iter.i] +} + +// IsEmpty returns true if the ListResult contains no values. +func (vmssvlr VirtualMachineScaleSetVMListResult) IsEmpty() bool { + return vmssvlr.Value == nil || len(*vmssvlr.Value) == 0 +} + +// virtualMachineScaleSetVMListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (vmssvlr VirtualMachineScaleSetVMListResult) virtualMachineScaleSetVMListResultPreparer() (*http.Request, error) { + if vmssvlr.NextLink == nil || len(to.String(vmssvlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(vmssvlr.NextLink))) +} + +// VirtualMachineScaleSetVMListResultPage contains a page of VirtualMachineScaleSetVM values. +type VirtualMachineScaleSetVMListResultPage struct { + fn func(VirtualMachineScaleSetVMListResult) (VirtualMachineScaleSetVMListResult, error) + vmssvlr VirtualMachineScaleSetVMListResult +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *VirtualMachineScaleSetVMListResultPage) Next() error { + next, err := page.fn(page.vmssvlr) + if err != nil { + return err + } + page.vmssvlr = next + return nil +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page VirtualMachineScaleSetVMListResultPage) NotDone() bool { + return !page.vmssvlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page VirtualMachineScaleSetVMListResultPage) Response() VirtualMachineScaleSetVMListResult { + return page.vmssvlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page VirtualMachineScaleSetVMListResultPage) Values() []VirtualMachineScaleSetVM { + if page.vmssvlr.IsEmpty() { + return nil + } + return *page.vmssvlr.Value +} + +// VirtualMachineScaleSetVMProfile describes a virtual machine scale set virtual machine profile. +type VirtualMachineScaleSetVMProfile struct { + // OsProfile - Specifies the operating system settings for the virtual machines in the scale set. + OsProfile *VirtualMachineScaleSetOSProfile `json:"osProfile,omitempty"` + // StorageProfile - Specifies the storage settings for the virtual machine disks. + StorageProfile *VirtualMachineScaleSetStorageProfile `json:"storageProfile,omitempty"` + // NetworkProfile - Specifies properties of the network interfaces of the virtual machines in the scale set. + NetworkProfile *VirtualMachineScaleSetNetworkProfile `json:"networkProfile,omitempty"` + // DiagnosticsProfile - Specifies the boot diagnostic settings state.

    Minimum api-version: 2015-06-15. + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + // ExtensionProfile - Specifies a collection of settings for extensions installed on virtual machines in the scale set. + ExtensionProfile *VirtualMachineScaleSetExtensionProfile `json:"extensionProfile,omitempty"` + // LicenseType - Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system.

    Possible values are:

    Windows_Client

    Windows_Server

    If this element is included in a request for an update, the value must match the initial value. This value cannot be updated.

    For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Minimum api-version: 2015-06-15 + LicenseType *string `json:"licenseType,omitempty"` + // Priority - Specifies the priority for the virtual machines in the scale set.

    Minimum api-version: 2017-10-30-preview. Possible values include: 'Regular', 'Low' + Priority VirtualMachinePriorityTypes `json:"priority,omitempty"` +} + +// VirtualMachineScaleSetVMProperties describes the properties of a virtual machine scale set virtual machine. +type VirtualMachineScaleSetVMProperties struct { + // LatestModelApplied - Specifies whether the latest model has been applied to the virtual machine. + LatestModelApplied *bool `json:"latestModelApplied,omitempty"` + // VMID - Azure VM unique ID. + VMID *string `json:"vmId,omitempty"` + // InstanceView - The virtual machine instance view. + InstanceView *VirtualMachineInstanceView `json:"instanceView,omitempty"` + // HardwareProfile - Specifies the hardware settings for the virtual machine. + HardwareProfile *HardwareProfile `json:"hardwareProfile,omitempty"` + // StorageProfile - Specifies the storage settings for the virtual machine disks. + StorageProfile *StorageProfile `json:"storageProfile,omitempty"` + // OsProfile - Specifies the operating system settings for the virtual machine. + OsProfile *OSProfile `json:"osProfile,omitempty"` + // NetworkProfile - Specifies the network interfaces of the virtual machine. + NetworkProfile *NetworkProfile `json:"networkProfile,omitempty"` + // DiagnosticsProfile - Specifies the boot diagnostic settings state.

    Minimum api-version: 2015-06-15. + DiagnosticsProfile *DiagnosticsProfile `json:"diagnosticsProfile,omitempty"` + // AvailabilitySet - Specifies information about the availability set that the virtual machine should be assigned to. Virtual machines specified in the same availability set are allocated to different nodes to maximize availability. For more information about availability sets, see [Manage the availability of virtual machines](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-manage-availability?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json).

    For more information on Azure planned maintainance, see [Planned maintenance for virtual machines in Azure](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-planned-maintenance?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Currently, a VM can only be added to availability set at creation time. An existing VM cannot be added to an availability set. + AvailabilitySet *SubResource `json:"availabilitySet,omitempty"` + // ProvisioningState - The provisioning state, which only appears in the response. + ProvisioningState *string `json:"provisioningState,omitempty"` + // LicenseType - Specifies that the image or disk that is being used was licensed on-premises. This element is only used for images that contain the Windows Server operating system.

    Possible values are:

    Windows_Client

    Windows_Server

    If this element is included in a request for an update, the value must match the initial value. This value cannot be updated.

    For more information, see [Azure Hybrid Use Benefit for Windows Server](https://docs.microsoft.com/azure/virtual-machines/virtual-machines-windows-hybrid-use-benefit-licensing?toc=%2fazure%2fvirtual-machines%2fwindows%2ftoc.json)

    Minimum api-version: 2015-06-15 + LicenseType *string `json:"licenseType,omitempty"` +} + +// VirtualMachineScaleSetVMsDeallocateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetVMsDeallocateFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetVMsDeallocateFuture) Result(client VirtualMachineScaleSetVMsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetVMsDeallocateFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.DeallocateResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.DeallocateResponder(resp) + return +} + +// VirtualMachineScaleSetVMsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetVMsDeleteFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetVMsDeleteFuture) Result(client VirtualMachineScaleSetVMsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetVMsDeleteFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.DeleteResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.DeleteResponder(resp) + return +} + +// VirtualMachineScaleSetVMsPowerOffFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetVMsPowerOffFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetVMsPowerOffFuture) Result(client VirtualMachineScaleSetVMsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetVMsPowerOffFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.PowerOffResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.PowerOffResponder(resp) + return +} + +// VirtualMachineScaleSetVMsReimageAllFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetVMsReimageAllFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetVMsReimageAllFuture) Result(client VirtualMachineScaleSetVMsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetVMsReimageAllFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.ReimageAllResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.ReimageAllResponder(resp) + return +} + +// VirtualMachineScaleSetVMsReimageFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetVMsReimageFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetVMsReimageFuture) Result(client VirtualMachineScaleSetVMsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetVMsReimageFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.ReimageResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.ReimageResponder(resp) + return +} + +// VirtualMachineScaleSetVMsRestartFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetVMsRestartFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetVMsRestartFuture) Result(client VirtualMachineScaleSetVMsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetVMsRestartFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.RestartResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.RestartResponder(resp) + return +} + +// VirtualMachineScaleSetVMsStartFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetVMsStartFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetVMsStartFuture) Result(client VirtualMachineScaleSetVMsClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachineScaleSetVMsStartFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.StartResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.StartResponder(resp) + return +} + +// VirtualMachineScaleSetVMsUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachineScaleSetVMsUpdateFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachineScaleSetVMsUpdateFuture) Result(client VirtualMachineScaleSetVMsClient) (vmssv VirtualMachineScaleSetVM, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return vmssv, autorest.NewError("compute.VirtualMachineScaleSetVMsUpdateFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + vmssv, err = client.UpdateResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + vmssv, err = client.UpdateResponder(resp) + return +} + +// VirtualMachinesCaptureFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type VirtualMachinesCaptureFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachinesCaptureFuture) Result(client VirtualMachinesClient) (vmcr VirtualMachineCaptureResult, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return vmcr, autorest.NewError("compute.VirtualMachinesCaptureFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + vmcr, err = client.CaptureResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + vmcr, err = client.CaptureResponder(resp) + return +} + +// VirtualMachinesConvertToManagedDisksFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type VirtualMachinesConvertToManagedDisksFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachinesConvertToManagedDisksFuture) Result(client VirtualMachinesClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachinesConvertToManagedDisksFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.ConvertToManagedDisksResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.ConvertToManagedDisksResponder(resp) + return +} + +// VirtualMachinesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachinesCreateOrUpdateFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachinesCreateOrUpdateFuture) Result(client VirtualMachinesClient) (VM VirtualMachine, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return VM, autorest.NewError("compute.VirtualMachinesCreateOrUpdateFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + VM, err = client.CreateOrUpdateResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + VM, err = client.CreateOrUpdateResponder(resp) + return +} + +// VirtualMachinesDeallocateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachinesDeallocateFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachinesDeallocateFuture) Result(client VirtualMachinesClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachinesDeallocateFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.DeallocateResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.DeallocateResponder(resp) + return +} + +// VirtualMachinesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type VirtualMachinesDeleteFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachinesDeleteFuture) Result(client VirtualMachinesClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachinesDeleteFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.DeleteResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.DeleteResponder(resp) + return +} + +// VirtualMachineSize describes the properties of a VM size. +type VirtualMachineSize struct { + // Name - The name of the virtual machine size. + Name *string `json:"name,omitempty"` + // NumberOfCores - The number of cores supported by the virtual machine size. + NumberOfCores *int32 `json:"numberOfCores,omitempty"` + // OsDiskSizeInMB - The OS disk size, in MB, allowed by the virtual machine size. + OsDiskSizeInMB *int32 `json:"osDiskSizeInMB,omitempty"` + // ResourceDiskSizeInMB - The resource disk size, in MB, allowed by the virtual machine size. + ResourceDiskSizeInMB *int32 `json:"resourceDiskSizeInMB,omitempty"` + // MemoryInMB - The amount of memory, in MB, supported by the virtual machine size. + MemoryInMB *int32 `json:"memoryInMB,omitempty"` + // MaxDataDiskCount - The maximum number of data disks that can be attached to the virtual machine size. + MaxDataDiskCount *int32 `json:"maxDataDiskCount,omitempty"` +} + +// VirtualMachineSizeListResult the List Virtual Machine operation response. +type VirtualMachineSizeListResult struct { + autorest.Response `json:"-"` + // Value - The list of virtual machine sizes. + Value *[]VirtualMachineSize `json:"value,omitempty"` +} + +// VirtualMachinesPerformMaintenanceFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachinesPerformMaintenanceFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachinesPerformMaintenanceFuture) Result(client VirtualMachinesClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachinesPerformMaintenanceFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.PerformMaintenanceResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.PerformMaintenanceResponder(resp) + return +} + +// VirtualMachinesPowerOffFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type VirtualMachinesPowerOffFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachinesPowerOffFuture) Result(client VirtualMachinesClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachinesPowerOffFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.PowerOffResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.PowerOffResponder(resp) + return +} + +// VirtualMachinesRedeployFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type VirtualMachinesRedeployFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachinesRedeployFuture) Result(client VirtualMachinesClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachinesRedeployFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.RedeployResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.RedeployResponder(resp) + return +} + +// VirtualMachinesRestartFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type VirtualMachinesRestartFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachinesRestartFuture) Result(client VirtualMachinesClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachinesRestartFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.RestartResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.RestartResponder(resp) + return +} + +// VirtualMachinesRunCommandFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type VirtualMachinesRunCommandFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachinesRunCommandFuture) Result(client VirtualMachinesClient) (rcr RunCommandResult, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return rcr, autorest.NewError("compute.VirtualMachinesRunCommandFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + rcr, err = client.RunCommandResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + rcr, err = client.RunCommandResponder(resp) + return +} + +// VirtualMachinesStartFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type VirtualMachinesStartFuture struct { + azure.Future + req *http.Request +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future VirtualMachinesStartFuture) Result(client VirtualMachinesClient) (osr OperationStatusResponse, err error) { + var done bool + done, err = future.Done(client) + if err != nil { + return + } + if !done { + return osr, autorest.NewError("compute.VirtualMachinesStartFuture", "Result", "asynchronous operation has not completed") + } + if future.PollingMethod() == azure.PollingLocation { + osr, err = client.StartResponder(future.Response()) + return + } + var resp *http.Response + resp, err = autorest.SendWithSender(client, autorest.ChangeToGet(future.req), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if err != nil { + return + } + osr, err = client.StartResponder(resp) + return +} + +// VirtualMachineStatusCodeCount the status code and count of the virtual machine scale set instance view status +// summary. +type VirtualMachineStatusCodeCount struct { + // Code - The instance view status code. + Code *string `json:"code,omitempty"` + // Count - The number of instances having a particular status code. + Count *int32 `json:"count,omitempty"` +} + +// WindowsConfiguration specifies Windows operating system settings on the virtual machine. +type WindowsConfiguration struct { + // ProvisionVMAgent - Indicates whether virtual machine agent should be provisioned on the virtual machine.

    When this property is not specified in the request body, default behavior is to set it to true. This will ensure that VM Agent is installed on the VM so that extensions can be added to the VM later. + ProvisionVMAgent *bool `json:"provisionVMAgent,omitempty"` + // EnableAutomaticUpdates - Indicates whether virtual machine is enabled for automatic updates. + EnableAutomaticUpdates *bool `json:"enableAutomaticUpdates,omitempty"` + // TimeZone - Specifies the time zone of the virtual machine. e.g. "Pacific Standard Time" + TimeZone *string `json:"timeZone,omitempty"` + // AdditionalUnattendContent - Specifies additional base-64 encoded XML formatted information that can be included in the Unattend.xml file, which is used by Windows Setup. + AdditionalUnattendContent *[]AdditionalUnattendContent `json:"additionalUnattendContent,omitempty"` + // WinRM - Specifies the Windows Remote Management listeners. This enables remote Windows PowerShell. + WinRM *WinRMConfiguration `json:"winRM,omitempty"` +} + +// WinRMConfiguration describes Windows Remote Management configuration of the VM +type WinRMConfiguration struct { + // Listeners - The list of Windows Remote Management listeners + Listeners *[]WinRMListener `json:"listeners,omitempty"` +} + +// WinRMListener describes Protocol and thumbprint of Windows Remote Management listener +type WinRMListener struct { + // Protocol - Specifies the protocol of listener.

    Possible values are:
    **http**

    **https**. Possible values include: 'HTTP', 'HTTPS' + Protocol ProtocolTypes `json:"protocol,omitempty"` + // CertificateURL - This is the URL of a certificate that has been uploaded to Key Vault as a secret. For adding a secret to the Key Vault, see [Add a key or secret to the key vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add). In this case, your certificate needs to be It is the Base64 encoding of the following JSON Object which is encoded in UTF-8:

    {
    "data":"",
    "dataType":"pfx",
    "password":""
    } + CertificateURL *string `json:"certificateUrl,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/resourceskus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/resourceskus.go new file mode 100644 index 000000000000..de32a3df699d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/resourceskus.go @@ -0,0 +1,130 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ResourceSkusClient is the compute Client +type ResourceSkusClient struct { + BaseClient +} + +// NewResourceSkusClient creates an instance of the ResourceSkusClient client. +func NewResourceSkusClient(subscriptionID string) ResourceSkusClient { + return NewResourceSkusClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewResourceSkusClientWithBaseURI creates an instance of the ResourceSkusClient client. +func NewResourceSkusClientWithBaseURI(baseURI string, subscriptionID string) ResourceSkusClient { + return ResourceSkusClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets the list of Microsoft.Compute SKUs available for your Subscription. +func (client ResourceSkusClient) List(ctx context.Context) (result ResourceSkusResultPage, err error) { + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.rsr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", resp, "Failure sending request") + return + } + + result.rsr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ResourceSkusClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/skus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ResourceSkusClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ResourceSkusClient) ListResponder(resp *http.Response) (result ResourceSkusResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ResourceSkusClient) listNextResults(lastResults ResourceSkusResult) (result ResourceSkusResult, err error) { + req, err := lastResults.resourceSkusResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.ResourceSkusClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ResourceSkusClient) ListComplete(ctx context.Context) (result ResourceSkusResultIterator, err error) { + result.page, err = client.List(ctx) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/snapshots.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/snapshots.go new file mode 100644 index 000000000000..e5e1a9dd85a4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/snapshots.go @@ -0,0 +1,677 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// SnapshotsClient is the compute Client +type SnapshotsClient struct { + BaseClient +} + +// NewSnapshotsClient creates an instance of the SnapshotsClient client. +func NewSnapshotsClient(subscriptionID string) SnapshotsClient { + return NewSnapshotsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSnapshotsClientWithBaseURI creates an instance of the SnapshotsClient client. +func NewSnapshotsClientWithBaseURI(baseURI string, subscriptionID string) SnapshotsClient { + return SnapshotsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a snapshot. +// +// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot that is being created. +// The name can't be changed after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. +// The max name length is 80 characters. snapshot is snapshot object supplied in the body of the Put disk operation. +func (client SnapshotsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot) (result SnapshotsCreateOrUpdateFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: snapshot, + Constraints: []validation.Constraint{{Target: "snapshot.DiskProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData.ImageReference", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "snapshot.DiskProperties.EncryptionSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "snapshot.DiskProperties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "snapshot.DiskProperties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.SnapshotsClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, snapshotName, snapshot) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SnapshotsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, snapshotName string, snapshot Snapshot) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithJSON(snapshot), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) CreateOrUpdateSender(req *http.Request) (future SnapshotsCreateOrUpdateFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) CreateOrUpdateResponder(resp *http.Response) (result Snapshot, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a snapshot. +// +// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot that is being created. +// The name can't be changed after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. +// The max name length is 80 characters. +func (client SnapshotsClient) Delete(ctx context.Context, resourceGroupName string, snapshotName string) (result SnapshotsDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, snapshotName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SnapshotsClient) DeletePreparer(ctx context.Context, resourceGroupName string, snapshotName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) DeleteSender(req *http.Request) (future SnapshotsDeleteFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets information about a snapshot. +// +// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot that is being created. +// The name can't be changed after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. +// The max name length is 80 characters. +func (client SnapshotsClient) Get(ctx context.Context, resourceGroupName string, snapshotName string) (result Snapshot, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, snapshotName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SnapshotsClient) GetPreparer(ctx context.Context, resourceGroupName string, snapshotName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) GetResponder(resp *http.Response) (result Snapshot, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GrantAccess grants access to a snapshot. +// +// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot that is being created. +// The name can't be changed after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. +// The max name length is 80 characters. grantAccessData is access data object supplied in the body of the get snapshot +// access operation. +func (client SnapshotsClient) GrantAccess(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData GrantAccessData) (result SnapshotsGrantAccessFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: grantAccessData, + Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.SnapshotsClient", "GrantAccess") + } + + req, err := client.GrantAccessPreparer(ctx, resourceGroupName, snapshotName, grantAccessData) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "GrantAccess", nil, "Failure preparing request") + return + } + + result, err = client.GrantAccessSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "GrantAccess", result.Response(), "Failure sending request") + return + } + + return +} + +// GrantAccessPreparer prepares the GrantAccess request. +func (client SnapshotsClient) GrantAccessPreparer(ctx context.Context, resourceGroupName string, snapshotName string, grantAccessData GrantAccessData) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess", pathParameters), + autorest.WithJSON(grantAccessData), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GrantAccessSender sends the GrantAccess request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) GrantAccessSender(req *http.Request) (future SnapshotsGrantAccessFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// GrantAccessResponder handles the response to the GrantAccess request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists snapshots under a subscription. +func (client SnapshotsClient) List(ctx context.Context) (result SnapshotListPage, err error) { + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.sl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", resp, "Failure sending request") + return + } + + result.sl, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SnapshotsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) ListResponder(resp *http.Response) (result SnapshotList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client SnapshotsClient) listNextResults(lastResults SnapshotList) (result SnapshotList, err error) { + req, err := lastResults.snapshotListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client SnapshotsClient) ListComplete(ctx context.Context) (result SnapshotListIterator, err error) { + result.page, err = client.List(ctx) + return +} + +// ListByResourceGroup lists snapshots under a resource group. +// +// resourceGroupName is the name of the resource group. +func (client SnapshotsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result SnapshotListPage, err error) { + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.sl.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.sl, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client SnapshotsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) ListByResourceGroupResponder(resp *http.Response) (result SnapshotList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client SnapshotsClient) listByResourceGroupNextResults(lastResults SnapshotList) (result SnapshotList, err error) { + req, err := lastResults.snapshotListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client SnapshotsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result SnapshotListIterator, err error) { + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} + +// RevokeAccess revokes access to a snapshot. +// +// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot that is being created. +// The name can't be changed after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. +// The max name length is 80 characters. +func (client SnapshotsClient) RevokeAccess(ctx context.Context, resourceGroupName string, snapshotName string) (result SnapshotsRevokeAccessFuture, err error) { + req, err := client.RevokeAccessPreparer(ctx, resourceGroupName, snapshotName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "RevokeAccess", nil, "Failure preparing request") + return + } + + result, err = client.RevokeAccessSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "RevokeAccess", result.Response(), "Failure sending request") + return + } + + return +} + +// RevokeAccessPreparer prepares the RevokeAccess request. +func (client SnapshotsClient) RevokeAccessPreparer(ctx context.Context, resourceGroupName string, snapshotName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RevokeAccessSender sends the RevokeAccess request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) RevokeAccessSender(req *http.Request) (future SnapshotsRevokeAccessFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// RevokeAccessResponder handles the response to the RevokeAccess request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) RevokeAccessResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates (patches) a snapshot. +// +// resourceGroupName is the name of the resource group. snapshotName is the name of the snapshot that is being created. +// The name can't be changed after the snapshot is created. Supported characters for the name are a-z, A-Z, 0-9 and _. +// The max name length is 80 characters. snapshot is snapshot object supplied in the body of the Patch snapshot +// operation. +func (client SnapshotsClient) Update(ctx context.Context, resourceGroupName string, snapshotName string, snapshot SnapshotUpdate) (result SnapshotsUpdateFuture, err error) { + req, err := client.UpdatePreparer(ctx, resourceGroupName, snapshotName, snapshot) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.SnapshotsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client SnapshotsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, snapshotName string, snapshot SnapshotUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-03-30" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithJSON(snapshot), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) UpdateSender(req *http.Request) (future SnapshotsUpdateFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) UpdateResponder(resp *http.Response) (result Snapshot, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/usage.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/usage.go new file mode 100644 index 000000000000..3a316670fda0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/usage.go @@ -0,0 +1,141 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// UsageClient is the compute Client +type UsageClient struct { + BaseClient +} + +// NewUsageClient creates an instance of the UsageClient client. +func NewUsageClient(subscriptionID string) UsageClient { + return NewUsageClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsageClientWithBaseURI creates an instance of the UsageClient client. +func NewUsageClientWithBaseURI(baseURI string, subscriptionID string) UsageClient { + return UsageClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets, for the specified location, the current compute resource usage information as well as the limits for +// compute resources under the subscription. +// +// location is the location for which resource usage is queried. +func (client UsageClient) List(ctx context.Context, location string) (result ListUsagesResultPage, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: location, + Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.UsageClient", "List") + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, location) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.UsageClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.lur.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.UsageClient", "List", resp, "Failure sending request") + return + } + + result.lur, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.UsageClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client UsageClient) ListPreparer(ctx context.Context, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client UsageClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client UsageClient) ListResponder(resp *http.Response) (result ListUsagesResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client UsageClient) listNextResults(lastResults ListUsagesResult) (result ListUsagesResult, err error) { + req, err := lastResults.listUsagesResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.UsageClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.UsageClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.UsageClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client UsageClient) ListComplete(ctx context.Context, location string) (result ListUsagesResultIterator, err error) { + result.page, err = client.List(ctx, location) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/version.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/version.go new file mode 100644 index 000000000000..00a3106b8812 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/version.go @@ -0,0 +1,28 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/v12.4.0-beta arm-compute/" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return "v12.4.0-beta" +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineextensionimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineextensionimages.go new file mode 100644 index 000000000000..1573aa734dd8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineextensionimages.go @@ -0,0 +1,251 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// VirtualMachineExtensionImagesClient is the compute Client +type VirtualMachineExtensionImagesClient struct { + BaseClient +} + +// NewVirtualMachineExtensionImagesClient creates an instance of the VirtualMachineExtensionImagesClient client. +func NewVirtualMachineExtensionImagesClient(subscriptionID string) VirtualMachineExtensionImagesClient { + return NewVirtualMachineExtensionImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineExtensionImagesClientWithBaseURI creates an instance of the VirtualMachineExtensionImagesClient +// client. +func NewVirtualMachineExtensionImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionImagesClient { + return VirtualMachineExtensionImagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a virtual machine extension image. +// +// location is the name of a supported Azure region. +func (client VirtualMachineExtensionImagesClient) Get(ctx context.Context, location string, publisherName string, typeParameter string, version string) (result VirtualMachineExtensionImage, err error) { + req, err := client.GetPreparer(ctx, location, publisherName, typeParameter, version) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineExtensionImagesClient) GetPreparer(ctx context.Context, location string, publisherName string, typeParameter string, version string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "publisherName": autorest.Encode("path", publisherName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "type": autorest.Encode("path", typeParameter), + "version": autorest.Encode("path", version), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions/{version}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionImagesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionImagesClient) GetResponder(resp *http.Response) (result VirtualMachineExtensionImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListTypes gets a list of virtual machine extension image types. +// +// location is the name of a supported Azure region. +func (client VirtualMachineExtensionImagesClient) ListTypes(ctx context.Context, location string, publisherName string) (result ListVirtualMachineExtensionImage, err error) { + req, err := client.ListTypesPreparer(ctx, location, publisherName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", nil, "Failure preparing request") + return + } + + resp, err := client.ListTypesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", resp, "Failure sending request") + return + } + + result, err = client.ListTypesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListTypes", resp, "Failure responding to request") + } + + return +} + +// ListTypesPreparer prepares the ListTypes request. +func (client VirtualMachineExtensionImagesClient) ListTypesPreparer(ctx context.Context, location string, publisherName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "publisherName": autorest.Encode("path", publisherName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListTypesSender sends the ListTypes request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionImagesClient) ListTypesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListTypesResponder handles the response to the ListTypes request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionImagesClient) ListTypesResponder(resp *http.Response) (result ListVirtualMachineExtensionImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListVersions gets a list of virtual machine extension image versions. +// +// location is the name of a supported Azure region. filter is the filter to apply on the operation. +func (client VirtualMachineExtensionImagesClient) ListVersions(ctx context.Context, location string, publisherName string, typeParameter string, filter string, top *int32, orderby string) (result ListVirtualMachineExtensionImage, err error) { + req, err := client.ListVersionsPreparer(ctx, location, publisherName, typeParameter, filter, top, orderby) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", nil, "Failure preparing request") + return + } + + resp, err := client.ListVersionsSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", resp, "Failure sending request") + return + } + + result, err = client.ListVersionsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionImagesClient", "ListVersions", resp, "Failure responding to request") + } + + return +} + +// ListVersionsPreparer prepares the ListVersions request. +func (client VirtualMachineExtensionImagesClient) ListVersionsPreparer(ctx context.Context, location string, publisherName string, typeParameter string, filter string, top *int32, orderby string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "publisherName": autorest.Encode("path", publisherName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "type": autorest.Encode("path", typeParameter), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderby) > 0 { + queryParameters["$orderby"] = autorest.Encode("query", orderby) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types/{type}/versions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListVersionsSender sends the ListVersions request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionImagesClient) ListVersionsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListVersionsResponder handles the response to the ListVersions request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionImagesClient) ListVersionsResponder(resp *http.Response) (result ListVirtualMachineExtensionImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineextensions.go new file mode 100644 index 000000000000..d6d9066ccfa4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineextensions.go @@ -0,0 +1,255 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// VirtualMachineExtensionsClient is the compute Client +type VirtualMachineExtensionsClient struct { + BaseClient +} + +// NewVirtualMachineExtensionsClient creates an instance of the VirtualMachineExtensionsClient client. +func NewVirtualMachineExtensionsClient(subscriptionID string) VirtualMachineExtensionsClient { + return NewVirtualMachineExtensionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineExtensionsClientWithBaseURI creates an instance of the VirtualMachineExtensionsClient client. +func NewVirtualMachineExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionsClient { + return VirtualMachineExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the operation to create or update the extension. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine where the extension +// should be create or updated. VMExtensionName is the name of the virtual machine extension. extensionParameters is +// parameters supplied to the Create Virtual Machine Extension operation. +func (client VirtualMachineExtensionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, extensionParameters VirtualMachineExtension) (result VirtualMachineExtensionsCreateOrUpdateFuture, err error) { + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, VMName, VMExtensionName, extensionParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualMachineExtensionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, extensionParameters VirtualMachineExtension) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmExtensionName": autorest.Encode("path", VMExtensionName), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters), + autorest.WithJSON(extensionParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineExtensionsCreateOrUpdateFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineExtension, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the operation to delete the extension. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine where the extension +// should be deleted. VMExtensionName is the name of the virtual machine extension. +func (client VirtualMachineExtensionsClient) Delete(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string) (result VirtualMachineExtensionsDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, VMName, VMExtensionName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachineExtensionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmExtensionName": autorest.Encode("path", VMExtensionName), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionsClient) DeleteSender(req *http.Request) (future VirtualMachineExtensionsDeleteFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get the operation to get the extension. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine containing the +// extension. VMExtensionName is the name of the virtual machine extension. expand is the expand expression to apply on +// the operation. +func (client VirtualMachineExtensionsClient) Get(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, expand string) (result VirtualMachineExtension, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, VMName, VMExtensionName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineExtensionsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineExtensionsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMName string, VMExtensionName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmExtensionName": autorest.Encode("path", VMExtensionName), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/extensions/{vmExtensionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineExtensionsClient) GetResponder(resp *http.Response) (result VirtualMachineExtension, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineimages.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineimages.go new file mode 100644 index 000000000000..e3aa2bc8224e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineimages.go @@ -0,0 +1,387 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// VirtualMachineImagesClient is the compute Client +type VirtualMachineImagesClient struct { + BaseClient +} + +// NewVirtualMachineImagesClient creates an instance of the VirtualMachineImagesClient client. +func NewVirtualMachineImagesClient(subscriptionID string) VirtualMachineImagesClient { + return NewVirtualMachineImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineImagesClientWithBaseURI creates an instance of the VirtualMachineImagesClient client. +func NewVirtualMachineImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineImagesClient { + return VirtualMachineImagesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a virtual machine image. +// +// location is the name of a supported Azure region. publisherName is a valid image publisher. offer is a valid image +// publisher offer. skus is a valid image SKU. version is a valid image SKU version. +func (client VirtualMachineImagesClient) Get(ctx context.Context, location string, publisherName string, offer string, skus string, version string) (result VirtualMachineImage, err error) { + req, err := client.GetPreparer(ctx, location, publisherName, offer, skus, version) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineImagesClient) GetPreparer(ctx context.Context, location string, publisherName string, offer string, skus string, version string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "offer": autorest.Encode("path", offer), + "publisherName": autorest.Encode("path", publisherName), + "skus": autorest.Encode("path", skus), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "version": autorest.Encode("path", version), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) GetResponder(resp *http.Response) (result VirtualMachineImage, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of all virtual machine image versions for the specified location, publisher, offer, and SKU. +// +// location is the name of a supported Azure region. publisherName is a valid image publisher. offer is a valid image +// publisher offer. skus is a valid image SKU. filter is the filter to apply on the operation. +func (client VirtualMachineImagesClient) List(ctx context.Context, location string, publisherName string, offer string, skus string, filter string, top *int32, orderby string) (result ListVirtualMachineImageResource, err error) { + req, err := client.ListPreparer(ctx, location, publisherName, offer, skus, filter, top, orderby) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineImagesClient) ListPreparer(ctx context.Context, location string, publisherName string, offer string, skus string, filter string, top *int32, orderby string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "offer": autorest.Encode("path", offer), + "publisherName": autorest.Encode("path", publisherName), + "skus": autorest.Encode("path", skus), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(orderby) > 0 { + queryParameters["$orderby"] = autorest.Encode("query", orderby) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) ListResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListOffers gets a list of virtual machine image offers for the specified location and publisher. +// +// location is the name of a supported Azure region. publisherName is a valid image publisher. +func (client VirtualMachineImagesClient) ListOffers(ctx context.Context, location string, publisherName string) (result ListVirtualMachineImageResource, err error) { + req, err := client.ListOffersPreparer(ctx, location, publisherName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", nil, "Failure preparing request") + return + } + + resp, err := client.ListOffersSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", resp, "Failure sending request") + return + } + + result, err = client.ListOffersResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListOffers", resp, "Failure responding to request") + } + + return +} + +// ListOffersPreparer prepares the ListOffers request. +func (client VirtualMachineImagesClient) ListOffersPreparer(ctx context.Context, location string, publisherName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "publisherName": autorest.Encode("path", publisherName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListOffersSender sends the ListOffers request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) ListOffersSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListOffersResponder handles the response to the ListOffers request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) ListOffersResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListPublishers gets a list of virtual machine image publishers for the specified Azure location. +// +// location is the name of a supported Azure region. +func (client VirtualMachineImagesClient) ListPublishers(ctx context.Context, location string) (result ListVirtualMachineImageResource, err error) { + req, err := client.ListPublishersPreparer(ctx, location) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", nil, "Failure preparing request") + return + } + + resp, err := client.ListPublishersSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", resp, "Failure sending request") + return + } + + result, err = client.ListPublishersResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListPublishers", resp, "Failure responding to request") + } + + return +} + +// ListPublishersPreparer prepares the ListPublishers request. +func (client VirtualMachineImagesClient) ListPublishersPreparer(ctx context.Context, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListPublishersSender sends the ListPublishers request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) ListPublishersSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListPublishersResponder handles the response to the ListPublishers request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) ListPublishersResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListSkus gets a list of virtual machine image SKUs for the specified location, publisher, and offer. +// +// location is the name of a supported Azure region. publisherName is a valid image publisher. offer is a valid image +// publisher offer. +func (client VirtualMachineImagesClient) ListSkus(ctx context.Context, location string, publisherName string, offer string) (result ListVirtualMachineImageResource, err error) { + req, err := client.ListSkusPreparer(ctx, location, publisherName, offer) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", nil, "Failure preparing request") + return + } + + resp, err := client.ListSkusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", resp, "Failure sending request") + return + } + + result, err = client.ListSkusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineImagesClient", "ListSkus", resp, "Failure responding to request") + } + + return +} + +// ListSkusPreparer prepares the ListSkus request. +func (client VirtualMachineImagesClient) ListSkusPreparer(ctx context.Context, location string, publisherName string, offer string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "offer": autorest.Encode("path", offer), + "publisherName": autorest.Encode("path", publisherName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSkusSender sends the ListSkus request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineImagesClient) ListSkusSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListSkusResponder handles the response to the ListSkus request. The method always +// closes the http.Response Body. +func (client VirtualMachineImagesClient) ListSkusResponder(resp *http.Response) (result ListVirtualMachineImageResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result.Value), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineruncommands.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineruncommands.go new file mode 100644 index 000000000000..79674138d2cb --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachineruncommands.go @@ -0,0 +1,212 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// VirtualMachineRunCommandsClient is the compute Client +type VirtualMachineRunCommandsClient struct { + BaseClient +} + +// NewVirtualMachineRunCommandsClient creates an instance of the VirtualMachineRunCommandsClient client. +func NewVirtualMachineRunCommandsClient(subscriptionID string) VirtualMachineRunCommandsClient { + return NewVirtualMachineRunCommandsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineRunCommandsClientWithBaseURI creates an instance of the VirtualMachineRunCommandsClient client. +func NewVirtualMachineRunCommandsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineRunCommandsClient { + return VirtualMachineRunCommandsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets specific run command for a subscription in a location. +// +// location is the location upon which run commands is queried. commandID is the command id. +func (client VirtualMachineRunCommandsClient) Get(ctx context.Context, location string, commandID string) (result RunCommandDocument, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: location, + Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineRunCommandsClient", "Get") + } + + req, err := client.GetPreparer(ctx, location, commandID) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineRunCommandsClient) GetPreparer(ctx context.Context, location string, commandID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "commandId": autorest.Encode("path", commandID), + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands/{commandId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineRunCommandsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineRunCommandsClient) GetResponder(resp *http.Response) (result RunCommandDocument, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all available run commands for a subscription in a location. +// +// location is the location upon which run commands is queried. +func (client VirtualMachineRunCommandsClient) List(ctx context.Context, location string) (result RunCommandListResultPage, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: location, + Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineRunCommandsClient", "List") + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, location) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.rclr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "List", resp, "Failure sending request") + return + } + + result.rclr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineRunCommandsClient) ListPreparer(ctx context.Context, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/runCommands", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineRunCommandsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineRunCommandsClient) ListResponder(resp *http.Response) (result RunCommandListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client VirtualMachineRunCommandsClient) listNextResults(lastResults RunCommandListResult) (result RunCommandListResult, err error) { + req, err := lastResults.runCommandListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineRunCommandsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client VirtualMachineRunCommandsClient) ListComplete(ctx context.Context, location string) (result RunCommandListResultIterator, err error) { + result.page, err = client.List(ctx, location) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachines.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachines.go new file mode 100644 index 000000000000..8e61949a5240 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachines.go @@ -0,0 +1,1290 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// VirtualMachinesClient is the compute Client +type VirtualMachinesClient struct { + BaseClient +} + +// NewVirtualMachinesClient creates an instance of the VirtualMachinesClient client. +func NewVirtualMachinesClient(subscriptionID string) VirtualMachinesClient { + return NewVirtualMachinesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachinesClientWithBaseURI creates an instance of the VirtualMachinesClient client. +func NewVirtualMachinesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachinesClient { + return VirtualMachinesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Capture captures the VM by copying virtual hard disks of the VM and outputs a template that can be used to create +// similar VMs. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. parameters is +// parameters supplied to the Capture Virtual Machine operation. +func (client VirtualMachinesClient) Capture(ctx context.Context, resourceGroupName string, VMName string, parameters VirtualMachineCaptureParameters) (result VirtualMachinesCaptureFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.VhdPrefix", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.DestinationContainerName", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.OverwriteVhds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachinesClient", "Capture") + } + + req, err := client.CapturePreparer(ctx, resourceGroupName, VMName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", nil, "Failure preparing request") + return + } + + result, err = client.CaptureSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Capture", result.Response(), "Failure sending request") + return + } + + return +} + +// CapturePreparer prepares the Capture request. +func (client VirtualMachinesClient) CapturePreparer(ctx context.Context, resourceGroupName string, VMName string, parameters VirtualMachineCaptureParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CaptureSender sends the Capture request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) CaptureSender(req *http.Request) (future VirtualMachinesCaptureFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// CaptureResponder handles the response to the Capture request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) CaptureResponder(resp *http.Response) (result VirtualMachineCaptureResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ConvertToManagedDisks converts virtual machine disks from blob-based to managed disks. Virtual machine must be +// stop-deallocated before invoking this operation. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. +func (client VirtualMachinesClient) ConvertToManagedDisks(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesConvertToManagedDisksFuture, err error) { + req, err := client.ConvertToManagedDisksPreparer(ctx, resourceGroupName, VMName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ConvertToManagedDisks", nil, "Failure preparing request") + return + } + + result, err = client.ConvertToManagedDisksSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ConvertToManagedDisks", result.Response(), "Failure sending request") + return + } + + return +} + +// ConvertToManagedDisksPreparer prepares the ConvertToManagedDisks request. +func (client VirtualMachinesClient) ConvertToManagedDisksPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ConvertToManagedDisksSender sends the ConvertToManagedDisks request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ConvertToManagedDisksSender(req *http.Request) (future VirtualMachinesConvertToManagedDisksFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// ConvertToManagedDisksResponder handles the response to the ConvertToManagedDisks request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ConvertToManagedDisksResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdate the operation to create or update a virtual machine. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. parameters is +// parameters supplied to the Create Virtual Machine operation. +func (client VirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters VirtualMachine) (result VirtualMachinesCreateOrUpdateFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.VirtualMachineProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.VirtualMachineProperties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + }}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachinesClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, VMName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualMachinesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, VMName string, parameters VirtualMachine) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachinesCreateOrUpdateFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachine, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Deallocate shuts down the virtual machine and releases the compute resources. You are not billed for the compute +// resources that this virtual machine uses. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. +func (client VirtualMachinesClient) Deallocate(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesDeallocateFuture, err error) { + req, err := client.DeallocatePreparer(ctx, resourceGroupName, VMName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", nil, "Failure preparing request") + return + } + + result, err = client.DeallocateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Deallocate", result.Response(), "Failure sending request") + return + } + + return +} + +// DeallocatePreparer prepares the Deallocate request. +func (client VirtualMachinesClient) DeallocatePreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeallocateSender sends the Deallocate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) DeallocateSender(req *http.Request) (future VirtualMachinesDeallocateFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// DeallocateResponder handles the response to the Deallocate request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) DeallocateResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the operation to delete a virtual machine. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. +func (client VirtualMachinesClient) Delete(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, VMName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachinesClient) DeletePreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) DeleteSender(req *http.Request) (future VirtualMachinesDeleteFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Generalize sets the state of the virtual machine to generalized. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. +func (client VirtualMachinesClient) Generalize(ctx context.Context, resourceGroupName string, VMName string) (result OperationStatusResponse, err error) { + req, err := client.GeneralizePreparer(ctx, resourceGroupName, VMName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", nil, "Failure preparing request") + return + } + + resp, err := client.GeneralizeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", resp, "Failure sending request") + return + } + + result, err = client.GeneralizeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Generalize", resp, "Failure responding to request") + } + + return +} + +// GeneralizePreparer prepares the Generalize request. +func (client VirtualMachinesClient) GeneralizePreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GeneralizeSender sends the Generalize request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) GeneralizeSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GeneralizeResponder handles the response to the Generalize request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) GeneralizeResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get retrieves information about the model view or the instance view of a virtual machine. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. expand is the expand +// expression to apply on the operation. +func (client VirtualMachinesClient) Get(ctx context.Context, resourceGroupName string, VMName string, expand InstanceViewTypes) (result VirtualMachine, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, VMName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachinesClient) GetPreparer(ctx context.Context, resourceGroupName string, VMName string, expand InstanceViewTypes) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(string(expand)) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) GetResponder(resp *http.Response) (result VirtualMachine, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// InstanceView retrieves information about the run-time state of a virtual machine. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. +func (client VirtualMachinesClient) InstanceView(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachineInstanceView, err error) { + req, err := client.InstanceViewPreparer(ctx, resourceGroupName, VMName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "InstanceView", nil, "Failure preparing request") + return + } + + resp, err := client.InstanceViewSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "InstanceView", resp, "Failure sending request") + return + } + + result, err = client.InstanceViewResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "InstanceView", resp, "Failure responding to request") + } + + return +} + +// InstanceViewPreparer prepares the InstanceView request. +func (client VirtualMachinesClient) InstanceViewPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/instanceView", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// InstanceViewSender sends the InstanceView request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) InstanceViewSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// InstanceViewResponder handles the response to the InstanceView request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) InstanceViewResponder(resp *http.Response) (result VirtualMachineInstanceView, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all of the virtual machines in the specified resource group. Use the nextLink property in the response to +// get the next page of virtual machines. +// +// resourceGroupName is the name of the resource group. +func (client VirtualMachinesClient) List(ctx context.Context, resourceGroupName string) (result VirtualMachineListResultPage, err error) { + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.vmlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure sending request") + return + } + + result.vmlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachinesClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ListResponder(resp *http.Response) (result VirtualMachineListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client VirtualMachinesClient) listNextResults(lastResults VirtualMachineListResult) (result VirtualMachineListResult, err error) { + req, err := lastResults.virtualMachineListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client VirtualMachinesClient) ListComplete(ctx context.Context, resourceGroupName string) (result VirtualMachineListResultIterator, err error) { + result.page, err = client.List(ctx, resourceGroupName) + return +} + +// ListAll lists all of the virtual machines in the specified subscription. Use the nextLink property in the response +// to get the next page of virtual machines. +func (client VirtualMachinesClient) ListAll(ctx context.Context) (result VirtualMachineListResultPage, err error) { + result.fn = client.listAllNextResults + req, err := client.ListAllPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.vmlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure sending request") + return + } + + result.vmlr, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client VirtualMachinesClient) ListAllPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ListAllResponder(resp *http.Response) (result VirtualMachineListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAllNextResults retrieves the next set of results, if any. +func (client VirtualMachinesClient) listAllNextResults(lastResults VirtualMachineListResult) (result VirtualMachineListResult, err error) { + req, err := lastResults.virtualMachineListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listAllNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listAllNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "listAllNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAllComplete enumerates all values, automatically crossing page boundaries as required. +func (client VirtualMachinesClient) ListAllComplete(ctx context.Context) (result VirtualMachineListResultIterator, err error) { + result.page, err = client.ListAll(ctx) + return +} + +// ListAvailableSizes lists all available virtual machine sizes to which the specified virtual machine can be resized. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. +func (client VirtualMachinesClient) ListAvailableSizes(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachineSizeListResult, err error) { + req, err := client.ListAvailableSizesPreparer(ctx, resourceGroupName, VMName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", nil, "Failure preparing request") + return + } + + resp, err := client.ListAvailableSizesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", resp, "Failure sending request") + return + } + + result, err = client.ListAvailableSizesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "ListAvailableSizes", resp, "Failure responding to request") + } + + return +} + +// ListAvailableSizesPreparer prepares the ListAvailableSizes request. +func (client VirtualMachinesClient) ListAvailableSizesPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAvailableSizesSender sends the ListAvailableSizes request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) ListAvailableSizesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListAvailableSizesResponder handles the response to the ListAvailableSizes request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) ListAvailableSizesResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// PerformMaintenance the operation to perform maintenance on a virtual machine. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. +func (client VirtualMachinesClient) PerformMaintenance(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesPerformMaintenanceFuture, err error) { + req, err := client.PerformMaintenancePreparer(ctx, resourceGroupName, VMName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PerformMaintenance", nil, "Failure preparing request") + return + } + + result, err = client.PerformMaintenanceSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PerformMaintenance", result.Response(), "Failure sending request") + return + } + + return +} + +// PerformMaintenancePreparer prepares the PerformMaintenance request. +func (client VirtualMachinesClient) PerformMaintenancePreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PerformMaintenanceSender sends the PerformMaintenance request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) PerformMaintenanceSender(req *http.Request) (future VirtualMachinesPerformMaintenanceFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// PerformMaintenanceResponder handles the response to the PerformMaintenance request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) PerformMaintenanceResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// PowerOff the operation to power off (stop) a virtual machine. The virtual machine can be restarted with the same +// provisioned resources. You are still charged for this virtual machine. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. +func (client VirtualMachinesClient) PowerOff(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesPowerOffFuture, err error) { + req, err := client.PowerOffPreparer(ctx, resourceGroupName, VMName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", nil, "Failure preparing request") + return + } + + result, err = client.PowerOffSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "PowerOff", result.Response(), "Failure sending request") + return + } + + return +} + +// PowerOffPreparer prepares the PowerOff request. +func (client VirtualMachinesClient) PowerOffPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PowerOffSender sends the PowerOff request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) PowerOffSender(req *http.Request) (future VirtualMachinesPowerOffFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// PowerOffResponder handles the response to the PowerOff request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) PowerOffResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Redeploy the operation to redeploy a virtual machine. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. +func (client VirtualMachinesClient) Redeploy(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesRedeployFuture, err error) { + req, err := client.RedeployPreparer(ctx, resourceGroupName, VMName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", nil, "Failure preparing request") + return + } + + result, err = client.RedeploySender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Redeploy", result.Response(), "Failure sending request") + return + } + + return +} + +// RedeployPreparer prepares the Redeploy request. +func (client VirtualMachinesClient) RedeployPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RedeploySender sends the Redeploy request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) RedeploySender(req *http.Request) (future VirtualMachinesRedeployFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// RedeployResponder handles the response to the Redeploy request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) RedeployResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Restart the operation to restart a virtual machine. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. +func (client VirtualMachinesClient) Restart(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesRestartFuture, err error) { + req, err := client.RestartPreparer(ctx, resourceGroupName, VMName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", nil, "Failure preparing request") + return + } + + result, err = client.RestartSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Restart", result.Response(), "Failure sending request") + return + } + + return +} + +// RestartPreparer prepares the Restart request. +func (client VirtualMachinesClient) RestartPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RestartSender sends the Restart request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) RestartSender(req *http.Request) (future VirtualMachinesRestartFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// RestartResponder handles the response to the Restart request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) RestartResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RunCommand run command on the VM. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. parameters is +// parameters supplied to the Run command operation. +func (client VirtualMachinesClient) RunCommand(ctx context.Context, resourceGroupName string, VMName string, parameters RunCommandInput) (result VirtualMachinesRunCommandFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.CommandID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachinesClient", "RunCommand") + } + + req, err := client.RunCommandPreparer(ctx, resourceGroupName, VMName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "RunCommand", nil, "Failure preparing request") + return + } + + result, err = client.RunCommandSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "RunCommand", result.Response(), "Failure sending request") + return + } + + return +} + +// RunCommandPreparer prepares the RunCommand request. +func (client VirtualMachinesClient) RunCommandPreparer(ctx context.Context, resourceGroupName string, VMName string, parameters RunCommandInput) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RunCommandSender sends the RunCommand request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) RunCommandSender(req *http.Request) (future VirtualMachinesRunCommandFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// RunCommandResponder handles the response to the RunCommand request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) RunCommandResponder(resp *http.Response) (result RunCommandResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Start the operation to start a virtual machine. +// +// resourceGroupName is the name of the resource group. VMName is the name of the virtual machine. +func (client VirtualMachinesClient) Start(ctx context.Context, resourceGroupName string, VMName string) (result VirtualMachinesStartFuture, err error) { + req, err := client.StartPreparer(ctx, resourceGroupName, VMName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", nil, "Failure preparing request") + return + } + + result, err = client.StartSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "Start", result.Response(), "Failure sending request") + return + } + + return +} + +// StartPreparer prepares the Start request. +func (client VirtualMachinesClient) StartPreparer(ctx context.Context, resourceGroupName string, VMName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmName": autorest.Encode("path", VMName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachinesClient) StartSender(req *http.Request) (future VirtualMachinesStartFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client VirtualMachinesClient) StartResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetextensions.go new file mode 100644 index 000000000000..d27a62b7954f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetextensions.go @@ -0,0 +1,351 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// VirtualMachineScaleSetExtensionsClient is the compute Client +type VirtualMachineScaleSetExtensionsClient struct { + BaseClient +} + +// NewVirtualMachineScaleSetExtensionsClient creates an instance of the VirtualMachineScaleSetExtensionsClient client. +func NewVirtualMachineScaleSetExtensionsClient(subscriptionID string) VirtualMachineScaleSetExtensionsClient { + return NewVirtualMachineScaleSetExtensionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineScaleSetExtensionsClientWithBaseURI creates an instance of the +// VirtualMachineScaleSetExtensionsClient client. +func NewVirtualMachineScaleSetExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetExtensionsClient { + return VirtualMachineScaleSetExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the operation to create or update an extension. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set where the +// extension should be create or updated. vmssExtensionName is the name of the VM scale set extension. +// extensionParameters is parameters supplied to the Create VM scale set Extension operation. +func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtension) (result VirtualMachineScaleSetExtensionsCreateOrUpdateFuture, err error) { + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, VMScaleSetName, vmssExtensionName, extensionParameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, extensionParameters VirtualMachineScaleSetExtension) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + "vmssExtensionName": autorest.Encode("path", vmssExtensionName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}", pathParameters), + autorest.WithJSON(extensionParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineScaleSetExtensionsCreateOrUpdateFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetExtensionsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineScaleSetExtension, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete the operation to delete the extension. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set where the +// extension should be deleted. vmssExtensionName is the name of the VM scale set extension. +func (client VirtualMachineScaleSetExtensionsClient) Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string) (result VirtualMachineScaleSetExtensionsDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, VMScaleSetName, vmssExtensionName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachineScaleSetExtensionsClient) DeletePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + "vmssExtensionName": autorest.Encode("path", vmssExtensionName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetExtensionsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetExtensionsDeleteFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetExtensionsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get the operation to get the extension. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set containing the +// extension. vmssExtensionName is the name of the VM scale set extension. expand is the expand expression to apply on +// the operation. +func (client VirtualMachineScaleSetExtensionsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, expand string) (result VirtualMachineScaleSetExtension, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, vmssExtensionName, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineScaleSetExtensionsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, vmssExtensionName string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + "vmssExtensionName": autorest.Encode("path", vmssExtensionName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions/{vmssExtensionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetExtensionsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetExtensionsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSetExtension, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of all extensions in a VM scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set containing the +// extension. +func (client VirtualMachineScaleSetExtensionsClient) List(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetExtensionListResultPage, err error) { + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, VMScaleSetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.vmsselr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "List", resp, "Failure sending request") + return + } + + result.vmsselr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineScaleSetExtensionsClient) ListPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/extensions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetExtensionsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetExtensionsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetExtensionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetExtensionsClient) listNextResults(lastResults VirtualMachineScaleSetExtensionListResult) (result VirtualMachineScaleSetExtensionListResult, err error) { + req, err := lastResults.virtualMachineScaleSetExtensionListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetExtensionsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client VirtualMachineScaleSetExtensionsClient) ListComplete(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetExtensionListResultIterator, err error) { + result.page, err = client.List(ctx, resourceGroupName, VMScaleSetName) + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetrollingupgrades.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetrollingupgrades.go new file mode 100644 index 000000000000..6e7b66cb06e9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetrollingupgrades.go @@ -0,0 +1,245 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// VirtualMachineScaleSetRollingUpgradesClient is the compute Client +type VirtualMachineScaleSetRollingUpgradesClient struct { + BaseClient +} + +// NewVirtualMachineScaleSetRollingUpgradesClient creates an instance of the +// VirtualMachineScaleSetRollingUpgradesClient client. +func NewVirtualMachineScaleSetRollingUpgradesClient(subscriptionID string) VirtualMachineScaleSetRollingUpgradesClient { + return NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI creates an instance of the +// VirtualMachineScaleSetRollingUpgradesClient client. +func NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetRollingUpgradesClient { + return VirtualMachineScaleSetRollingUpgradesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Cancel cancels the current virtual machine scale set rolling upgrade. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. +func (client VirtualMachineScaleSetRollingUpgradesClient) Cancel(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetRollingUpgradesCancelFuture, err error) { + req, err := client.CancelPreparer(ctx, resourceGroupName, VMScaleSetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "Cancel", nil, "Failure preparing request") + return + } + + result, err = client.CancelSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "Cancel", result.Response(), "Failure sending request") + return + } + + return +} + +// CancelPreparer prepares the Cancel request. +func (client VirtualMachineScaleSetRollingUpgradesClient) CancelPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/cancel", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CancelSender sends the Cancel request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetRollingUpgradesClient) CancelSender(req *http.Request) (future VirtualMachineScaleSetRollingUpgradesCancelFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// CancelResponder handles the response to the Cancel request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetRollingUpgradesClient) CancelResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetLatest gets the status of the latest virtual machine scale set rolling upgrade. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. +func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatest(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result RollingUpgradeStatusInfo, err error) { + req, err := client.GetLatestPreparer(ctx, resourceGroupName, VMScaleSetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "GetLatest", nil, "Failure preparing request") + return + } + + resp, err := client.GetLatestSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "GetLatest", resp, "Failure sending request") + return + } + + result, err = client.GetLatestResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "GetLatest", resp, "Failure responding to request") + } + + return +} + +// GetLatestPreparer prepares the GetLatest request. +func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/rollingUpgrades/latest", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetLatestSender sends the GetLatest request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetLatestResponder handles the response to the GetLatest request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetRollingUpgradesClient) GetLatestResponder(resp *http.Response) (result RollingUpgradeStatusInfo, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// StartOSUpgrade starts a rolling upgrade to move all virtual machine scale set instances to the latest available +// Platform Image OS version. Instances which are already running the latest available OS version are not affected. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. +func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgrade(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture, err error) { + req, err := client.StartOSUpgradePreparer(ctx, resourceGroupName, VMScaleSetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartOSUpgrade", nil, "Failure preparing request") + return + } + + result, err = client.StartOSUpgradeSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetRollingUpgradesClient", "StartOSUpgrade", result.Response(), "Failure sending request") + return + } + + return +} + +// StartOSUpgradePreparer prepares the StartOSUpgrade request. +func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/osRollingUpgrade", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// StartOSUpgradeSender sends the StartOSUpgrade request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradeSender(req *http.Request) (future VirtualMachineScaleSetRollingUpgradesStartOSUpgradeFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// StartOSUpgradeResponder handles the response to the StartOSUpgrade request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetRollingUpgradesClient) StartOSUpgradeResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesets.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesets.go new file mode 100644 index 000000000000..9045380d1d1f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesets.go @@ -0,0 +1,1356 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// VirtualMachineScaleSetsClient is the compute Client +type VirtualMachineScaleSetsClient struct { + BaseClient +} + +// NewVirtualMachineScaleSetsClient creates an instance of the VirtualMachineScaleSetsClient client. +func NewVirtualMachineScaleSetsClient(subscriptionID string) VirtualMachineScaleSetsClient { + return NewVirtualMachineScaleSetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineScaleSetsClientWithBaseURI creates an instance of the VirtualMachineScaleSetsClient client. +func NewVirtualMachineScaleSetsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetsClient { + return VirtualMachineScaleSetsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update a VM scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set to create or +// update. parameters is the scale set object. +func (client VirtualMachineScaleSetsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters VirtualMachineScaleSet) (result VirtualMachineScaleSetsCreateOrUpdateFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.InclusiveMaximum, Rule: 100, Chain: nil}, + {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxBatchInstancePercent", Name: validation.InclusiveMinimum, Rule: 5, Chain: nil}, + }}, + {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.InclusiveMaximum, Rule: 100, Chain: nil}, + {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyInstancePercent", Name: validation.InclusiveMinimum, Rule: 5, Chain: nil}, + }}, + {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.InclusiveMaximum, Rule: 100, Chain: nil}, + {Target: "parameters.VirtualMachineScaleSetProperties.UpgradePolicy.RollingUpgradePolicy.MaxUnhealthyUpgradedInstancePercent", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}, + }}, + }}, + }}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate") + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, VMScaleSetName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualMachineScaleSetsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters VirtualMachineScaleSet) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) CreateOrUpdateSender(req *http.Request) (future VirtualMachineScaleSetsCreateOrUpdateFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated)) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) CreateOrUpdateResponder(resp *http.Response) (result VirtualMachineScaleSet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Deallocate deallocates specific virtual machines in a VM scale set. Shuts down the virtual machines and releases the +// compute resources. You are not billed for the compute resources that this virtual machine scale set deallocates. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. VMInstanceIDs +// is a list of virtual machine instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) Deallocate(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsDeallocateFuture, err error) { + req, err := client.DeallocatePreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", nil, "Failure preparing request") + return + } + + result, err = client.DeallocateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Deallocate", result.Response(), "Failure sending request") + return + } + + return +} + +// DeallocatePreparer prepares the Deallocate request. +func (client VirtualMachineScaleSetsClient) DeallocatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/deallocate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if VMInstanceIDs != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(VMInstanceIDs)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeallocateSender sends the Deallocate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) DeallocateSender(req *http.Request) (future VirtualMachineScaleSetsDeallocateFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// DeallocateResponder handles the response to the Deallocate request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) DeallocateResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a VM scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. +func (client VirtualMachineScaleSetsClient) Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetsDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, VMScaleSetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachineScaleSetsClient) DeletePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetsDeleteFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// DeleteInstances deletes virtual machines in a VM scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. VMInstanceIDs +// is a list of virtual machine instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) DeleteInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs) (result VirtualMachineScaleSetsDeleteInstancesFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: VMInstanceIDs, + Constraints: []validation.Constraint{{Target: "VMInstanceIDs.InstanceIds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances") + } + + req, err := client.DeleteInstancesPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", nil, "Failure preparing request") + return + } + + result, err = client.DeleteInstancesSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "DeleteInstances", result.Response(), "Failure sending request") + return + } + + return +} + +// DeleteInstancesPreparer prepares the DeleteInstances request. +func (client VirtualMachineScaleSetsClient) DeleteInstancesPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/delete", pathParameters), + autorest.WithJSON(VMInstanceIDs), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteInstancesSender sends the DeleteInstances request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) DeleteInstancesSender(req *http.Request) (future VirtualMachineScaleSetsDeleteInstancesFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// DeleteInstancesResponder handles the response to the DeleteInstances request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) DeleteInstancesResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ForceRecoveryServiceFabricPlatformUpdateDomainWalk manual platform update domain walk to update virtual machines in +// a service fabric virtual machine scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. +// platformUpdateDomain is the platform update domain for which a manual recovery walk is requested +func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalk(ctx context.Context, resourceGroupName string, VMScaleSetName string, platformUpdateDomain int32) (result RecoveryWalkResponse, err error) { + req, err := client.ForceRecoveryServiceFabricPlatformUpdateDomainWalkPreparer(ctx, resourceGroupName, VMScaleSetName, platformUpdateDomain) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ForceRecoveryServiceFabricPlatformUpdateDomainWalk", nil, "Failure preparing request") + return + } + + resp, err := client.ForceRecoveryServiceFabricPlatformUpdateDomainWalkSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ForceRecoveryServiceFabricPlatformUpdateDomainWalk", resp, "Failure sending request") + return + } + + result, err = client.ForceRecoveryServiceFabricPlatformUpdateDomainWalkResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ForceRecoveryServiceFabricPlatformUpdateDomainWalk", resp, "Failure responding to request") + } + + return +} + +// ForceRecoveryServiceFabricPlatformUpdateDomainWalkPreparer prepares the ForceRecoveryServiceFabricPlatformUpdateDomainWalk request. +func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalkPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, platformUpdateDomain int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + "platformUpdateDomain": autorest.Encode("query", platformUpdateDomain), + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/forceRecoveryServiceFabricPlatformUpdateDomainWalk", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ForceRecoveryServiceFabricPlatformUpdateDomainWalkSender sends the ForceRecoveryServiceFabricPlatformUpdateDomainWalk request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalkSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ForceRecoveryServiceFabricPlatformUpdateDomainWalkResponder handles the response to the ForceRecoveryServiceFabricPlatformUpdateDomainWalk request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) ForceRecoveryServiceFabricPlatformUpdateDomainWalkResponder(resp *http.Response) (result RecoveryWalkResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get display information about a virtual machine scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. +func (client VirtualMachineScaleSetsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSet, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineScaleSetsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetInstanceView gets the status of a VM scale set instance. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. +func (client VirtualMachineScaleSetsClient) GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetInstanceView, err error) { + req, err := client.GetInstanceViewPreparer(ctx, resourceGroupName, VMScaleSetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", nil, "Failure preparing request") + return + } + + resp, err := client.GetInstanceViewSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", resp, "Failure sending request") + return + } + + result, err = client.GetInstanceViewResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "GetInstanceView", resp, "Failure responding to request") + } + + return +} + +// GetInstanceViewPreparer prepares the GetInstanceView request. +func (client VirtualMachineScaleSetsClient) GetInstanceViewPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/instanceView", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetInstanceViewSender sends the GetInstanceView request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetInstanceViewResponder handles the response to the GetInstanceView request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) GetInstanceViewResponder(resp *http.Response) (result VirtualMachineScaleSetInstanceView, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of all VM scale sets under a resource group. +// +// resourceGroupName is the name of the resource group. +func (client VirtualMachineScaleSetsClient) List(ctx context.Context, resourceGroupName string) (result VirtualMachineScaleSetListResultPage, err error) { + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.vmsslr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure sending request") + return + } + + result.vmsslr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineScaleSetsClient) ListPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetsClient) listNextResults(lastResults VirtualMachineScaleSetListResult) (result VirtualMachineScaleSetListResult, err error) { + req, err := lastResults.virtualMachineScaleSetListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client VirtualMachineScaleSetsClient) ListComplete(ctx context.Context, resourceGroupName string) (result VirtualMachineScaleSetListResultIterator, err error) { + result.page, err = client.List(ctx, resourceGroupName) + return +} + +// ListAll gets a list of all VM Scale Sets in the subscription, regardless of the associated resource group. Use +// nextLink property in the response to get the next page of VM Scale Sets. Do this till nextLink is null to fetch all +// the VM Scale Sets. +func (client VirtualMachineScaleSetsClient) ListAll(ctx context.Context) (result VirtualMachineScaleSetListWithLinkResultPage, err error) { + result.fn = client.listAllNextResults + req, err := client.ListAllPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", nil, "Failure preparing request") + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.vmsslwlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure sending request") + return + } + + result.vmsslwlr, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client VirtualMachineScaleSetsClient) ListAllPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachineScaleSets", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) ListAllResponder(resp *http.Response) (result VirtualMachineScaleSetListWithLinkResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAllNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetsClient) listAllNextResults(lastResults VirtualMachineScaleSetListWithLinkResult) (result VirtualMachineScaleSetListWithLinkResult, err error) { + req, err := lastResults.virtualMachineScaleSetListWithLinkResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listAllNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listAllNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listAllNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAllComplete enumerates all values, automatically crossing page boundaries as required. +func (client VirtualMachineScaleSetsClient) ListAllComplete(ctx context.Context) (result VirtualMachineScaleSetListWithLinkResultIterator, err error) { + result.page, err = client.ListAll(ctx) + return +} + +// ListSkus gets a list of SKUs available for your VM scale set, including the minimum and maximum VM instances allowed +// for each SKU. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. +func (client VirtualMachineScaleSetsClient) ListSkus(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetListSkusResultPage, err error) { + result.fn = client.listSkusNextResults + req, err := client.ListSkusPreparer(ctx, resourceGroupName, VMScaleSetName) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", nil, "Failure preparing request") + return + } + + resp, err := client.ListSkusSender(req) + if err != nil { + result.vmsslsr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure sending request") + return + } + + result.vmsslsr, err = client.ListSkusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ListSkus", resp, "Failure responding to request") + } + + return +} + +// ListSkusPreparer prepares the ListSkus request. +func (client VirtualMachineScaleSetsClient) ListSkusPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/skus", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSkusSender sends the ListSkus request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) ListSkusSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListSkusResponder handles the response to the ListSkus request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) ListSkusResponder(resp *http.Response) (result VirtualMachineScaleSetListSkusResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listSkusNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetsClient) listSkusNextResults(lastResults VirtualMachineScaleSetListSkusResult) (result VirtualMachineScaleSetListSkusResult, err error) { + req, err := lastResults.virtualMachineScaleSetListSkusResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listSkusNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSkusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listSkusNextResults", resp, "Failure sending next results request") + } + result, err = client.ListSkusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "listSkusNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListSkusComplete enumerates all values, automatically crossing page boundaries as required. +func (client VirtualMachineScaleSetsClient) ListSkusComplete(ctx context.Context, resourceGroupName string, VMScaleSetName string) (result VirtualMachineScaleSetListSkusResultIterator, err error) { + result.page, err = client.ListSkus(ctx, resourceGroupName, VMScaleSetName) + return +} + +// PowerOff power off (stop) one or more virtual machines in a VM scale set. Note that resources are still attached and +// you are getting charged for the resources. Instead, use deallocate to release resources and avoid charges. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. VMInstanceIDs +// is a list of virtual machine instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) PowerOff(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsPowerOffFuture, err error) { + req, err := client.PowerOffPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", nil, "Failure preparing request") + return + } + + result, err = client.PowerOffSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "PowerOff", result.Response(), "Failure sending request") + return + } + + return +} + +// PowerOffPreparer prepares the PowerOff request. +func (client VirtualMachineScaleSetsClient) PowerOffPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/poweroff", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if VMInstanceIDs != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(VMInstanceIDs)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PowerOffSender sends the PowerOff request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) PowerOffSender(req *http.Request) (future VirtualMachineScaleSetsPowerOffFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// PowerOffResponder handles the response to the PowerOff request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) PowerOffResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Reimage reimages (upgrade the operating system) one or more virtual machines in a VM scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. VMInstanceIDs +// is a list of virtual machine instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) Reimage(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsReimageFuture, err error) { + req, err := client.ReimagePreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", nil, "Failure preparing request") + return + } + + result, err = client.ReimageSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Reimage", result.Response(), "Failure sending request") + return + } + + return +} + +// ReimagePreparer prepares the Reimage request. +func (client VirtualMachineScaleSetsClient) ReimagePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/reimage", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if VMInstanceIDs != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(VMInstanceIDs)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ReimageSender sends the Reimage request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) ReimageSender(req *http.Request) (future VirtualMachineScaleSetsReimageFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// ReimageResponder handles the response to the Reimage request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) ReimageResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ReimageAll reimages all the disks ( including data disks ) in the virtual machines in a VM scale set. This operation +// is only supported for managed disks. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. VMInstanceIDs +// is a list of virtual machine instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) ReimageAll(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsReimageAllFuture, err error) { + req, err := client.ReimageAllPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ReimageAll", nil, "Failure preparing request") + return + } + + result, err = client.ReimageAllSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "ReimageAll", result.Response(), "Failure sending request") + return + } + + return +} + +// ReimageAllPreparer prepares the ReimageAll request. +func (client VirtualMachineScaleSetsClient) ReimageAllPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/reimageall", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if VMInstanceIDs != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(VMInstanceIDs)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ReimageAllSender sends the ReimageAll request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) ReimageAllSender(req *http.Request) (future VirtualMachineScaleSetsReimageAllFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// ReimageAllResponder handles the response to the ReimageAll request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) ReimageAllResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Restart restarts one or more virtual machines in a VM scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. VMInstanceIDs +// is a list of virtual machine instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) Restart(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsRestartFuture, err error) { + req, err := client.RestartPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", nil, "Failure preparing request") + return + } + + result, err = client.RestartSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Restart", result.Response(), "Failure sending request") + return + } + + return +} + +// RestartPreparer prepares the Restart request. +func (client VirtualMachineScaleSetsClient) RestartPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/restart", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if VMInstanceIDs != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(VMInstanceIDs)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RestartSender sends the Restart request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) RestartSender(req *http.Request) (future VirtualMachineScaleSetsRestartFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// RestartResponder handles the response to the Restart request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) RestartResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Start starts one or more virtual machines in a VM scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. VMInstanceIDs +// is a list of virtual machine instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) Start(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (result VirtualMachineScaleSetsStartFuture, err error) { + req, err := client.StartPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", nil, "Failure preparing request") + return + } + + result, err = client.StartSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Start", result.Response(), "Failure sending request") + return + } + + return +} + +// StartPreparer prepares the Start request. +func (client VirtualMachineScaleSetsClient) StartPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs *VirtualMachineScaleSetVMInstanceIDs) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/start", pathParameters), + autorest.WithQueryParameters(queryParameters)) + if VMInstanceIDs != nil { + preparer = autorest.DecoratePreparer(preparer, + autorest.WithJSON(VMInstanceIDs)) + } + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) StartSender(req *http.Request) (future VirtualMachineScaleSetsStartFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) StartResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update update a VM scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set to create or +// update. parameters is the scale set object. +func (client VirtualMachineScaleSetsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters VirtualMachineScaleSetUpdate) (result VirtualMachineScaleSetsUpdateFuture, err error) { + req, err := client.UpdatePreparer(ctx, resourceGroupName, VMScaleSetName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client VirtualMachineScaleSetsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, parameters VirtualMachineScaleSetUpdate) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) UpdateSender(req *http.Request) (future VirtualMachineScaleSetsUpdateFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK)) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) UpdateResponder(resp *http.Response) (result VirtualMachineScaleSet, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// UpdateInstances upgrades one or more virtual machines to the latest SKU set in the VM scale set model. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. VMInstanceIDs +// is a list of virtual machine instance IDs from the VM scale set. +func (client VirtualMachineScaleSetsClient) UpdateInstances(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs) (result VirtualMachineScaleSetsUpdateInstancesFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: VMInstanceIDs, + Constraints: []validation.Constraint{{Target: "VMInstanceIDs.InstanceIds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances") + } + + req, err := client.UpdateInstancesPreparer(ctx, resourceGroupName, VMScaleSetName, VMInstanceIDs) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", nil, "Failure preparing request") + return + } + + result, err = client.UpdateInstancesSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetsClient", "UpdateInstances", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdateInstancesPreparer prepares the UpdateInstances request. +func (client VirtualMachineScaleSetsClient) UpdateInstancesPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, VMInstanceIDs VirtualMachineScaleSetVMInstanceRequiredIDs) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/manualupgrade", pathParameters), + autorest.WithJSON(VMInstanceIDs), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateInstancesSender sends the UpdateInstances request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetsClient) UpdateInstancesSender(req *http.Request) (future VirtualMachineScaleSetsUpdateInstancesFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// UpdateInstancesResponder handles the response to the UpdateInstances request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetsClient) UpdateInstancesResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetvms.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetvms.go new file mode 100644 index 000000000000..9a55b534b5a8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinescalesetvms.go @@ -0,0 +1,870 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// VirtualMachineScaleSetVMsClient is the compute Client +type VirtualMachineScaleSetVMsClient struct { + BaseClient +} + +// NewVirtualMachineScaleSetVMsClient creates an instance of the VirtualMachineScaleSetVMsClient client. +func NewVirtualMachineScaleSetVMsClient(subscriptionID string) VirtualMachineScaleSetVMsClient { + return NewVirtualMachineScaleSetVMsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineScaleSetVMsClientWithBaseURI creates an instance of the VirtualMachineScaleSetVMsClient client. +func NewVirtualMachineScaleSetVMsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetVMsClient { + return VirtualMachineScaleSetVMsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Deallocate deallocates a specific virtual machine in a VM scale set. Shuts down the virtual machine and releases the +// compute resources it uses. You are not billed for the compute resources of this virtual machine once it is +// deallocated. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. +func (client VirtualMachineScaleSetVMsClient) Deallocate(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsDeallocateFuture, err error) { + req, err := client.DeallocatePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", nil, "Failure preparing request") + return + } + + result, err = client.DeallocateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Deallocate", result.Response(), "Failure sending request") + return + } + + return +} + +// DeallocatePreparer prepares the Deallocate request. +func (client VirtualMachineScaleSetVMsClient) DeallocatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/deallocate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeallocateSender sends the Deallocate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) DeallocateSender(req *http.Request) (future VirtualMachineScaleSetVMsDeallocateFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// DeallocateResponder handles the response to the Deallocate request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) DeallocateResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a virtual machine from a VM scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. +func (client VirtualMachineScaleSetVMsClient) Delete(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsDeleteFuture, err error) { + req, err := client.DeletePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualMachineScaleSetVMsClient) DeletePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) DeleteSender(req *http.Request) (future VirtualMachineScaleSetVMsDeleteFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent)) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets a virtual machine from a VM scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. +func (client VirtualMachineScaleSetVMsClient) Get(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVM, err error) { + req, err := client.GetPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualMachineScaleSetVMsClient) GetPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) GetResponder(resp *http.Response) (result VirtualMachineScaleSetVM, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetInstanceView gets the status of a virtual machine from a VM scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. +func (client VirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMInstanceView, err error) { + req, err := client.GetInstanceViewPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", nil, "Failure preparing request") + return + } + + resp, err := client.GetInstanceViewSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", resp, "Failure sending request") + return + } + + result, err = client.GetInstanceViewResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "GetInstanceView", resp, "Failure responding to request") + } + + return +} + +// GetInstanceViewPreparer prepares the GetInstanceView request. +func (client VirtualMachineScaleSetVMsClient) GetInstanceViewPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/instanceView", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetInstanceViewSender sends the GetInstanceView request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) GetInstanceViewSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// GetInstanceViewResponder handles the response to the GetInstanceView request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) GetInstanceViewResponder(resp *http.Response) (result VirtualMachineScaleSetVMInstanceView, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets a list of all virtual machines in a VM scale sets. +// +// resourceGroupName is the name of the resource group. virtualMachineScaleSetName is the name of the VM scale set. +// filter is the filter to apply to the operation. selectParameter is the list parameters. expand is the expand +// expression to apply to the operation. +func (client VirtualMachineScaleSetVMsClient) List(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result VirtualMachineScaleSetVMListResultPage, err error) { + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.vmssvlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure sending request") + return + } + + result.vmssvlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineScaleSetVMsClient) ListPreparer(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualMachineScaleSetName": autorest.Encode("path", virtualMachineScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) ListResponder(resp *http.Response) (result VirtualMachineScaleSetVMListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client VirtualMachineScaleSetVMsClient) listNextResults(lastResults VirtualMachineScaleSetVMListResult) (result VirtualMachineScaleSetVMListResult, err error) { + req, err := lastResults.virtualMachineScaleSetVMListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client VirtualMachineScaleSetVMsClient) ListComplete(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, filter string, selectParameter string, expand string) (result VirtualMachineScaleSetVMListResultIterator, err error) { + result.page, err = client.List(ctx, resourceGroupName, virtualMachineScaleSetName, filter, selectParameter, expand) + return +} + +// PowerOff power off (stop) a virtual machine in a VM scale set. Note that resources are still attached and you are +// getting charged for the resources. Instead, use deallocate to release resources and avoid charges. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. +func (client VirtualMachineScaleSetVMsClient) PowerOff(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsPowerOffFuture, err error) { + req, err := client.PowerOffPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", nil, "Failure preparing request") + return + } + + result, err = client.PowerOffSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "PowerOff", result.Response(), "Failure sending request") + return + } + + return +} + +// PowerOffPreparer prepares the PowerOff request. +func (client VirtualMachineScaleSetVMsClient) PowerOffPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/poweroff", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// PowerOffSender sends the PowerOff request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) PowerOffSender(req *http.Request) (future VirtualMachineScaleSetVMsPowerOffFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// PowerOffResponder handles the response to the PowerOff request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) PowerOffResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Reimage reimages (upgrade the operating system) a specific virtual machine in a VM scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. +func (client VirtualMachineScaleSetVMsClient) Reimage(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsReimageFuture, err error) { + req, err := client.ReimagePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", nil, "Failure preparing request") + return + } + + result, err = client.ReimageSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Reimage", result.Response(), "Failure sending request") + return + } + + return +} + +// ReimagePreparer prepares the Reimage request. +func (client VirtualMachineScaleSetVMsClient) ReimagePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimage", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ReimageSender sends the Reimage request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) ReimageSender(req *http.Request) (future VirtualMachineScaleSetVMsReimageFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// ReimageResponder handles the response to the Reimage request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) ReimageResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ReimageAll allows you to re-image all the disks ( including data disks ) in the a VM scale set instance. This +// operation is only supported for managed disks. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. +func (client VirtualMachineScaleSetVMsClient) ReimageAll(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsReimageAllFuture, err error) { + req, err := client.ReimageAllPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "ReimageAll", nil, "Failure preparing request") + return + } + + result, err = client.ReimageAllSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "ReimageAll", result.Response(), "Failure sending request") + return + } + + return +} + +// ReimageAllPreparer prepares the ReimageAll request. +func (client VirtualMachineScaleSetVMsClient) ReimageAllPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/reimageall", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ReimageAllSender sends the ReimageAll request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) ReimageAllSender(req *http.Request) (future VirtualMachineScaleSetVMsReimageAllFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// ReimageAllResponder handles the response to the ReimageAll request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) ReimageAllResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Restart restarts a virtual machine in a VM scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. +func (client VirtualMachineScaleSetVMsClient) Restart(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsRestartFuture, err error) { + req, err := client.RestartPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", nil, "Failure preparing request") + return + } + + result, err = client.RestartSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Restart", result.Response(), "Failure sending request") + return + } + + return +} + +// RestartPreparer prepares the Restart request. +func (client VirtualMachineScaleSetVMsClient) RestartPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/restart", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RestartSender sends the Restart request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) RestartSender(req *http.Request) (future VirtualMachineScaleSetVMsRestartFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// RestartResponder handles the response to the Restart request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) RestartResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Start starts a virtual machine in a VM scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set. instanceID is +// the instance ID of the virtual machine. +func (client VirtualMachineScaleSetVMsClient) Start(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (result VirtualMachineScaleSetVMsStartFuture, err error) { + req, err := client.StartPreparer(ctx, resourceGroupName, VMScaleSetName, instanceID) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", nil, "Failure preparing request") + return + } + + result, err = client.StartSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Start", result.Response(), "Failure sending request") + return + } + + return +} + +// StartPreparer prepares the Start request. +func (client VirtualMachineScaleSetVMsClient) StartPreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}/start", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// StartSender sends the Start request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) StartSender(req *http.Request) (future VirtualMachineScaleSetVMsStartFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// StartResponder handles the response to the Start request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) StartResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates a virtual machine of a VM scale set. +// +// resourceGroupName is the name of the resource group. VMScaleSetName is the name of the VM scale set where the +// extension should be create or updated. instanceID is the instance ID of the virtual machine. parameters is +// parameters supplied to the Update Virtual Machine Scale Sets VM operation. +func (client VirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters VirtualMachineScaleSetVM) (result VirtualMachineScaleSetVMsUpdateFuture, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetVMProperties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile.OsDisk", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile.OsDisk.EncryptionSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile.OsDisk.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.VirtualMachineScaleSetVMProperties.StorageProfile.OsDisk.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}, + }}, + }}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineScaleSetVMsClient", "Update") + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineScaleSetVMsClient", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client VirtualMachineScaleSetVMsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters VirtualMachineScaleSetVM) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "instanceId": autorest.Encode("path", instanceID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "vmScaleSetName": autorest.Encode("path", VMScaleSetName), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}/virtualmachines/{instanceId}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineScaleSetVMsClient) UpdateSender(req *http.Request) (future VirtualMachineScaleSetVMsUpdateFuture, err error) { + sender := autorest.DecorateSender(client, azure.DoRetryWithRegistration(client.Client)) + future.Future = azure.NewFuture(req) + future.req = req + _, err = future.Done(sender) + if err != nil { + return + } + err = autorest.Respond(future.Response(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted)) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client VirtualMachineScaleSetVMsClient) UpdateResponder(resp *http.Response) (result VirtualMachineScaleSetVM, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinesizes.go b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinesizes.go new file mode 100644 index 000000000000..22b8756043be --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2017-12-01/compute/virtualmachinesizes.go @@ -0,0 +1,112 @@ +package compute + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// VirtualMachineSizesClient is the compute Client +type VirtualMachineSizesClient struct { + BaseClient +} + +// NewVirtualMachineSizesClient creates an instance of the VirtualMachineSizesClient client. +func NewVirtualMachineSizesClient(subscriptionID string) VirtualMachineSizesClient { + return NewVirtualMachineSizesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineSizesClientWithBaseURI creates an instance of the VirtualMachineSizesClient client. +func NewVirtualMachineSizesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineSizesClient { + return VirtualMachineSizesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists all available virtual machine sizes for a subscription in a location. +// +// location is the location upon which virtual-machine-sizes is queried. +func (client VirtualMachineSizesClient) List(ctx context.Context, location string) (result VirtualMachineSizeListResult, err error) { + if err := validation.Validate([]validation.Validation{ + {TargetValue: location, + Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewErrorWithValidationError(err, "compute.VirtualMachineSizesClient", "List") + } + + req, err := client.ListPreparer(ctx, location) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachineSizesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualMachineSizesClient) ListPreparer(ctx context.Context, location string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "location": autorest.Encode("path", location), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-12-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/vmSizes", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualMachineSizesClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req, + azure.DoRetryWithRegistration(client.Client)) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualMachineSizesClient) ListResponder(resp *http.Response) (result VirtualMachineSizeListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md index 6dc348e02af2..85a0482d6ec6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/README.md @@ -6,7 +6,7 @@ This package includes support for [Azure Storage Emulator](https://azure.microso # Getting Started - 1. Go get the SDK `go get -u github.com/Azure/azure-sdk-for=go/storage` + 1. Go get the SDK `go get -u github.com/Azure/azure-sdk-for-go/storage` 1. If you don't already have one, [create a Storage Account](https://docs.microsoft.com/en-us/azure/storage/storage-create-storage-account). - Take note of your Azure Storage Account Name and Azure Storage Account Key. They'll both be necessary for using this library. - This option is production ready, but can also be used for development. @@ -70,4 +70,4 @@ ok, err = queue2.Exists() c.Assert(err, chk.IsNil) c.Assert(ok, chk.Equals, true) } -``` \ No newline at end of file +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go index a9d3cfccb685..5047bfbb24b8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -549,27 +549,7 @@ func (b *Blob) GetMetadata(options *GetBlobMetadataOptions) error { } func (b *Blob) writeMetadata(h http.Header) { - metadata := make(map[string]string) - for k, v := range h { - // Can't trust CanonicalHeaderKey() to munge case - // reliably. "_" is allowed in identifiers: - // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx - // https://msdn.microsoft.com/library/aa664670(VS.71).aspx - // http://tools.ietf.org/html/rfc7230#section-3.2 - // ...but "_" is considered invalid by - // CanonicalMIMEHeaderKey in - // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 - // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". - k = strings.ToLower(k) - if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { - continue - } - // metadata["lol"] = content of the last X-Ms-Meta-Lol header - k = k[len(userDefinedMetadataHeaderPrefix):] - metadata[k] = v[len(v)-1] - } - - b.Metadata = BlobMetadata(metadata) + b.Metadata = BlobMetadata(writeMetadata(h)) } // DeleteBlobOptions includes the options for a delete blob operation diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go index 8fe21b0cfd98..e6b9704ee183 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blobserviceclient.go @@ -15,6 +15,7 @@ package storage // limitations under the License. import ( + "encoding/xml" "fmt" "net/http" "net/url" @@ -85,21 +86,53 @@ func (b BlobStorageClient) ListContainers(params ListContainersParameters) (*Con uri := b.client.getEndpoint(blobServiceName, "", q) headers := b.client.getStandardHeaders() - var out ContainerListResponse + type ContainerAlias struct { + bsc *BlobStorageClient + Name string `xml:"Name"` + Properties ContainerProperties `xml:"Properties"` + Metadata BlobMetadata + sasuri url.URL + } + type ContainerListResponseAlias struct { + XMLName xml.Name `xml:"EnumerationResults"` + Xmlns string `xml:"xmlns,attr"` + Prefix string `xml:"Prefix"` + Marker string `xml:"Marker"` + NextMarker string `xml:"NextMarker"` + MaxResults int64 `xml:"MaxResults"` + Containers []ContainerAlias `xml:"Containers>Container"` + } + + var outAlias ContainerListResponseAlias resp, err := b.client.exec(http.MethodGet, uri, headers, nil, b.auth) if err != nil { return nil, err } defer resp.body.Close() - err = xmlUnmarshal(resp.body, &out) + err = xmlUnmarshal(resp.body, &outAlias) if err != nil { return nil, err } - // assign our client to the newly created Container objects - for i := range out.Containers { - out.Containers[i].bsc = &b + out := ContainerListResponse{ + XMLName: outAlias.XMLName, + Xmlns: outAlias.Xmlns, + Prefix: outAlias.Prefix, + Marker: outAlias.Marker, + NextMarker: outAlias.NextMarker, + MaxResults: outAlias.MaxResults, + Containers: make([]Container, len(outAlias.Containers)), + } + for i, cnt := range outAlias.Containers { + out.Containers[i] = Container{ + bsc: &b, + Name: cnt.Name, + Properties: cnt.Properties, + Metadata: map[string]string(cnt.Metadata), + sasuri: cnt.sasuri, + } } + return &out, err } @@ -124,3 +157,26 @@ func (p ListContainersParameters) getParameters() url.Values { return out } + +func writeMetadata(h http.Header) map[string]string { + metadata := make(map[string]string) + for k, v := range h { + // Can't trust CanonicalHeaderKey() to munge case + // reliably. "_" is allowed in identifiers: + // https://msdn.microsoft.com/en-us/library/azure/dd179414.aspx + // https://msdn.microsoft.com/library/aa664670(VS.71).aspx + // http://tools.ietf.org/html/rfc7230#section-3.2 + // ...but "_" is considered invalid by + // CanonicalMIMEHeaderKey in + // https://golang.org/src/net/textproto/reader.go?s=14615:14659#L542 + // so k can be "X-Ms-Meta-Lol" or "x-ms-meta-lol_rofl". + k = strings.ToLower(k) + if len(v) == 0 || !strings.HasPrefix(k, strings.ToLower(userDefinedMetadataHeaderPrefix)) { + continue + } + // metadata["lol"] = content of the last X-Ms-Meta-Lol header + k = k[len(userDefinedMetadataHeaderPrefix):] + metadata[k] = v[len(v)-1] + } + return metadata +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go index 8f6cd95da718..7d502306d81c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -31,6 +31,7 @@ import ( "net/url" "regexp" "runtime" + "strconv" "strings" "time" @@ -69,6 +70,17 @@ const ( userAgentHeader = "User-Agent" userDefinedMetadataHeaderPrefix = "x-ms-meta-" + + connectionStringAccountName = "accountname" + connectionStringAccountKey = "accountkey" + connectionStringEndpointSuffix = "endpointsuffix" + connectionStringEndpointProtocol = "defaultendpointsprotocol" + + connectionStringBlobEndpoint = "blobendpoint" + connectionStringFileEndpoint = "fileendpoint" + connectionStringQueueEndpoint = "queueendpoint" + connectionStringTableEndpoint = "tableendpoint" + connectionStringSAS = "sharedaccesssignature" ) var ( @@ -204,6 +216,55 @@ func (e UnexpectedStatusCodeError) Got() int { return e.got } +// NewClientFromConnectionString creates a Client from the connection string. +func NewClientFromConnectionString(input string) (Client, error) { + // build a map of connection string key/value pairs + parts := map[string]string{} + for _, pair := range strings.Split(input, ";") { + if pair == "" { + continue + } + + equalDex := strings.IndexByte(pair, '=') + if equalDex <= 0 { + return Client{}, fmt.Errorf("Invalid connection segment %q", pair) + } + + value := strings.TrimSpace(pair[equalDex+1:]) + key := strings.TrimSpace(strings.ToLower(pair[:equalDex])) + parts[key] = value + } + + // TODO: validate parameter sets? + + if parts[connectionStringAccountName] == StorageEmulatorAccountName { + return NewEmulatorClient() + } + + if parts[connectionStringSAS] != "" { + endpoint := "" + if parts[connectionStringBlobEndpoint] != "" { + endpoint = parts[connectionStringBlobEndpoint] + } else if parts[connectionStringFileEndpoint] != "" { + endpoint = parts[connectionStringFileEndpoint] + } else if parts[connectionStringQueueEndpoint] != "" { + endpoint = parts[connectionStringQueueEndpoint] + } else { + endpoint = parts[connectionStringTableEndpoint] + } + + return NewAccountSASClientFromEndpointToken(endpoint, parts[connectionStringSAS]) + } + + useHTTPS := defaultUseHTTPS + if parts[connectionStringEndpointProtocol] != "" { + useHTTPS = parts[connectionStringEndpointProtocol] == "https" + } + + return NewClient(parts[connectionStringAccountName], parts[connectionStringAccountKey], + parts[connectionStringEndpointSuffix], DefaultAPIVersion, useHTTPS) +} + // NewBasicClient constructs a Client with given storage service name and // key. func NewBasicClient(accountName, accountKey string) (Client, error) { @@ -285,6 +346,47 @@ func NewAccountSASClient(account string, token url.Values, env azure.Environment return c } +// NewAccountSASClientFromEndpointToken constructs a client that uses accountSAS authorization +// for its operations using the specified endpoint and SAS token. +func NewAccountSASClientFromEndpointToken(endpoint string, sasToken string) (Client, error) { + u, err := url.Parse(endpoint) + if err != nil { + return Client{}, err + } + + token, err := url.ParseQuery(sasToken) + if err != nil { + return Client{}, err + } + + // the host name will look something like this + // - foo.blob.core.windows.net + // "foo" is the account name + // "core.windows.net" is the baseURL + + // find the first dot to get account name + i1 := strings.IndexByte(u.Host, '.') + if i1 < 0 { + return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host) + } + + // now find the second dot to get the base URL + i2 := strings.IndexByte(u.Host[i1+1:], '.') + if i2 < 0 { + return Client{}, fmt.Errorf("failed to find '.' in %s", u.Host[i1+1:]) + } + + c := newSASClient() + c.accountSASToken = token + c.accountName = u.Host[:i1] + c.baseURL = u.Host[i1+i2+2:] + + // Get API version and protocol from token + c.apiVersion = token.Get("sv") + c.useHTTPS = token.Get("spr") == "https" + return c, nil +} + func newSASClient() Client { c := Client{ HTTPClient: http.DefaultClient, @@ -613,12 +715,13 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader return nil, errors.New("azure/storage: error creating request: " + err.Error()) } - // if a body was provided ensure that the content length was set. - // http.NewRequest() will automatically do this for a handful of types - // and for those that it doesn't we will handle here. - if body != nil && req.ContentLength < 1 { - if lr, ok := body.(*io.LimitedReader); ok { - setContentLengthFromLimitedReader(req, lr) + // http.NewRequest() will automatically set req.ContentLength for a handful of types + // otherwise we will handle here. + if req.ContentLength < 1 { + if clstr, ok := headers["Content-Length"]; ok { + if cl, err := strconv.ParseInt(clstr, 10, 64); err == nil { + req.ContentLength = cl + } } } @@ -626,6 +729,13 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645 } + if c.isAccountSASClient() { + // append the SAS token to the query params + v := req.URL.Query() + v = mergeParams(v, c.accountSASToken) + req.URL.RawQuery = v.Encode() + } + resp, err := c.Sender.Send(&c, req) if err != nil { return nil, err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go index 8963c7a89b37..38463bb67f0d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/container.go @@ -301,9 +301,6 @@ func (c *Container) Exists() (bool, error) { uri = newURI.String() } else { - if c.bsc.client.isAccountSASClient() { - q = mergeParams(q, c.bsc.client.accountSASToken) - } uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) } headers := c.bsc.client.getStandardHeaders() @@ -489,9 +486,6 @@ func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, err newURI.RawQuery = q.Encode() uri = newURI.String() } else { - if c.bsc.client.isAccountSASClient() { - q = mergeParams(q, c.bsc.client.accountSASToken) - } uri = c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), q) } @@ -512,6 +506,81 @@ func (c *Container) ListBlobs(params ListBlobsParameters) (BlobListResponse, err return out, err } +// ContainerMetadataOptions includes options for container metadata operations +type ContainerMetadataOptions struct { + Timeout uint + LeaseID string `header:"x-ms-lease-id"` + RequestID string `header:"x-ms-client-request-id"` +} + +// SetMetadata replaces the metadata for the specified container. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobMetadata. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/set-container-metadata +func (c *Container) SetMetadata(options *ContainerMetadataOptions) error { + params := url.Values{ + "comp": {"metadata"}, + "restype": {"container"}, + } + headers := c.bsc.client.getStandardHeaders() + headers = c.bsc.client.addMetadataToHeaders(headers, c.Metadata) + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodPut, uri, headers, nil, c.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + return checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + +// GetMetadata returns all user-defined metadata for the specified container. +// +// All metadata keys will be returned in lower case. (HTTP header +// names are case-insensitive.) +// +// See https://docs.microsoft.com/en-us/rest/api/storageservices/get-container-metadata +func (c *Container) GetMetadata(options *ContainerMetadataOptions) error { + params := url.Values{ + "comp": {"metadata"}, + "restype": {"container"}, + } + headers := c.bsc.client.getStandardHeaders() + + if options != nil { + params = addTimeout(params, options.Timeout) + headers = mergeHeaders(headers, headersFromStruct(*options)) + } + + uri := c.bsc.client.getEndpoint(blobServiceName, c.buildPath(), params) + + resp, err := c.bsc.client.exec(http.MethodGet, uri, headers, nil, c.bsc.auth) + if err != nil { + return err + } + readAndCloseBody(resp.body) + if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil { + return err + } + + c.writeMetadata(resp.headers) + return nil +} + +func (c *Container) writeMetadata(h http.Header) { + c.Metadata = writeMetadata(h) +} + func generateContainerACLpayload(policies []ContainerAccessPolicy) (io.Reader, int, error) { sil := SignedIdentifiers{ SignedIdentifiers: []SignedIdentifier{}, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go index 9668ea669494..4533d7d5edfc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/entity.go @@ -26,7 +26,7 @@ import ( "strings" "time" - "github.com/satori/uuid" + "github.com/satori/go.uuid" ) // Annotating as secure for gas scanning diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go index c59fd4b50b9d..f07166521696 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/pageblob.go @@ -87,10 +87,10 @@ func (b *Blob) modifyRange(blobRange BlobRange, bytes io.Reader, options *PutPag return errors.New("the value for rangeEnd must be greater than or equal to rangeStart") } if blobRange.Start%512 != 0 { - return errors.New("the value for rangeStart must be a modulus of 512") + return errors.New("the value for rangeStart must be a multiple of 512") } if blobRange.End%512 != 511 { - return errors.New("the value for rangeEnd must be a modulus of 511") + return errors.New("the value for rangeEnd must be a multiple of 512 - 1") } params := url.Values{"comp": {"page"}} @@ -147,7 +147,7 @@ func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesRespon params = addTimeout(params, options.Timeout) params = addSnapshot(params, options.Snapshot) if options.PreviousSnapshot != nil { - params.Add("prevsnapshot", timeRfc1123Formatted(*options.PreviousSnapshot)) + params.Add("prevsnapshot", timeRFC3339Formatted(*options.PreviousSnapshot)) } if options.Range != nil { headers["Range"] = options.Range.String() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go index 3f882417c65a..d1d75a2eb1a5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/table_batch.go @@ -26,7 +26,7 @@ import ( "sort" "strings" - "github.com/satori/uuid" + "github.com/marstr/guid" ) // Operation type. Insert, Delete, Replace etc. @@ -131,14 +131,26 @@ func (t *TableBatch) MergeEntity(entity *Entity) { // the changesets. // As per document https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/performing-entity-group-transactions func (t *TableBatch) ExecuteBatch() error { - changesetBoundary := fmt.Sprintf("changeset_%s", uuid.NewV1()) + + // Using `github.com/marstr/guid` is in response to issue #947 (https://github.com/Azure/azure-sdk-for-go/issues/947). + id, err := guid.NewGUIDs(guid.CreationStrategyVersion1) + if err != nil { + return err + } + + changesetBoundary := fmt.Sprintf("changeset_%s", id.String()) uri := t.Table.tsc.client.getEndpoint(tableServiceName, "$batch", nil) changesetBody, err := t.generateChangesetBody(changesetBoundary) if err != nil { return err } - boundary := fmt.Sprintf("batch_%s", uuid.NewV1()) + id, err = guid.NewGUIDs(guid.CreationStrategyVersion1) + if err != nil { + return err + } + + boundary := fmt.Sprintf("batch_%s", id.String()) body, err := generateBody(changesetBody, changesetBoundary, boundary) if err != nil { return err diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go index 7734b8f886f7..089a74a8cc67 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go @@ -71,6 +71,10 @@ func timeRfc1123Formatted(t time.Time) string { return t.Format(http.TimeFormat) } +func timeRFC3339Formatted(t time.Time) string { + return t.Format("2006-01-02T15:04:05.0000000Z") +} + func mergeParams(v1, v2 url.Values) url.Values { out := url.Values{} for k, v := range v1 { @@ -172,7 +176,7 @@ func addTimeout(params url.Values, timeout uint) url.Values { func addSnapshot(params url.Values, snapshot *time.Time) url.Values { if snapshot != nil { - params.Add("snapshot", snapshot.Format("2006-01-02T15:04:05.0000000Z")) + params.Add("snapshot", timeRFC3339Formatted(*snapshot)) } return params } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go deleted file mode 100644 index 67ff6ca03fe3..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.7.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "io" - "net/http" -) - -func setContentLengthFromLimitedReader(req *http.Request, lr *io.LimitedReader) { - req.ContentLength = lr.N -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go deleted file mode 100644 index eada102c0cff..000000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util_1.8.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build go1.8 - -// Copyright 2017 Microsoft Corporation -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package storage - -import ( - "io" - "io/ioutil" - "net/http" -) - -func setContentLengthFromLimitedReader(req *http.Request, lr *io.LimitedReader) { - req.ContentLength = lr.N - snapshot := *lr - req.GetBody = func() (io.ReadCloser, error) { - r := snapshot - return ioutil.NopCloser(&r), nil - } -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go index 1cd3e03d12a8..74eda60808c1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/version.go @@ -15,5 +15,5 @@ package storage // limitations under the License. var ( - sdkVersion = "10.0.2" + sdkVersion = "v12.4.0-beta" ) diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md index a17cf98c6215..08966c9cf892 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -218,6 +218,40 @@ if (err == nil) { } ``` +#### Username password authenticate + +```Go +spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( + oauthConfig, + applicationID, + username, + password, + resource, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Authorization code authenticate + +``` Go +spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( + oauthConfig, + applicationID, + clientSecret, + authorizationCode, + redirectURI, + resource, + callbacks...) + +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + ### Command Line Tool A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go index 49e9214d598a..f570d540a623 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -32,8 +32,24 @@ type OAuthConfig struct { DeviceCodeEndpoint url.URL } +// IsZero returns true if the OAuthConfig object is zero-initialized. +func (oac OAuthConfig) IsZero() bool { + return oac == OAuthConfig{} +} + +func validateStringParam(param, name string) error { + if len(param) == 0 { + return fmt.Errorf("parameter '" + name + "' cannot be empty") + } + return nil +} + // NewOAuthConfig returns an OAuthConfig with tenant specific urls func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { + return nil, err + } + // it's legal for tenantID to be empty so don't validate it const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" u, err := url.Parse(activeDirectoryEndpoint) if err != nil { diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go index 67dd97a18c18..941af281b568 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -27,6 +27,7 @@ import ( "net/url" "strconv" "strings" + "sync" "time" "github.com/Azure/go-autorest/autorest/date" @@ -42,9 +43,15 @@ const ( // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows OAuthGrantTypeClientCredentials = "client_credentials" + // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows + OAuthGrantTypeUserPass = "password" + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows OAuthGrantTypeRefreshToken = "refresh_token" + // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows + OAuthGrantTypeAuthorizationCode = "authorization_code" + // metadataHeader is the header required by MSI extension metadataHeader = "Metadata" ) @@ -54,6 +61,12 @@ type OAuthTokenProvider interface { OAuthToken() string } +// TokenRefreshError is an interface used by errors returned during token refresh. +type TokenRefreshError interface { + error + Response() *http.Response +} + // Refresher is an interface for token refresh functionality type Refresher interface { Refresh() error @@ -78,6 +91,11 @@ type Token struct { Type string `json:"token_type"` } +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + // Expires returns the time.Time when the Token expires. func (t Token) Expires() time.Time { s, err := strconv.Atoi(t.ExpiresOn) @@ -145,6 +163,34 @@ type ServicePrincipalCertificateSecret struct { type ServicePrincipalMSISecret struct { } +// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. +type ServicePrincipalUsernamePasswordSecret struct { + Username string + Password string +} + +// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. +type ServicePrincipalAuthorizationCodeSecret struct { + ClientSecret string + AuthorizationCode string + RedirectURI string +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("code", secret.AuthorizationCode) + v.Set("client_secret", secret.ClientSecret) + v.Set("redirect_uri", secret.RedirectURI) + return nil +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("username", secret.Username) + v.Set("password", secret.Password) + return nil +} + // SetAuthenticationValues is a method of the interface ServicePrincipalSecret. func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { return nil @@ -199,25 +245,46 @@ func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *Se type ServicePrincipalToken struct { Token - secret ServicePrincipalSecret - oauthConfig OAuthConfig - clientID string - resource string - autoRefresh bool - refreshWithin time.Duration - sender Sender + secret ServicePrincipalSecret + oauthConfig OAuthConfig + clientID string + resource string + autoRefresh bool + autoRefreshLock *sync.Mutex + refreshWithin time.Duration + sender Sender refreshCallbacks []TokenRefreshCallback } +func validateOAuthConfig(oac OAuthConfig) error { + if oac.IsZero() { + return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") + } + return nil +} + // NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(id, "id"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } spt := &ServicePrincipalToken{ oauthConfig: oauthConfig, secret: secret, clientID: id, resource: resource, autoRefresh: true, + autoRefreshLock: &sync.Mutex{}, refreshWithin: defaultRefresh, sender: &http.Client{}, refreshCallbacks: callbacks, @@ -227,6 +294,18 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso // NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } spt, err := NewServicePrincipalTokenWithSecret( oauthConfig, clientID, @@ -245,6 +324,18 @@ func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID s // NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal // credentials scoped to the named resource. func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } return NewServicePrincipalTokenWithSecret( oauthConfig, clientID, @@ -256,8 +347,23 @@ func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret s ) } -// NewServicePrincipalTokenFromCertificate create a ServicePrincipalToken from the supplied pkcs12 bytes. +// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } return NewServicePrincipalTokenWithSecret( oauthConfig, clientID, @@ -270,6 +376,70 @@ func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID s ) } +// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. +func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(username, "username"); err != nil { + return nil, err + } + if err := validateStringParam(password, "password"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalUsernamePasswordSecret{ + Username: username, + Password: password, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the +func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(clientSecret, "clientSecret"); err != nil { + return nil, err + } + if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { + return nil, err + } + if err := validateStringParam(redirectURI, "redirectURI"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: clientSecret, + AuthorizationCode: authorizationCode, + RedirectURI: redirectURI, + }, + callbacks..., + ) +} + // GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. func GetMSIVMEndpoint() (string, error) { return getMSIVMEndpoint(msiPath) @@ -293,7 +463,29 @@ func getMSIVMEndpoint(path string) (string, error) { } // NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the system assigned identity when creating the token. func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the specified user assigned identity when creating the token. +func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...) +} + +func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if userAssignedID != nil { + if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { + return nil, err + } + } // We set the oauth config token endpoint to be MSI's endpoint msiEndpointURL, err := url.Parse(msiEndpoint) if err != nil { @@ -310,19 +502,49 @@ func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ... secret: &ServicePrincipalMSISecret{}, resource: resource, autoRefresh: true, + autoRefreshLock: &sync.Mutex{}, refreshWithin: defaultRefresh, sender: &http.Client{}, refreshCallbacks: callbacks, } + if userAssignedID != nil { + spt.clientID = *userAssignedID + } + return spt, nil } +// internal type that implements TokenRefreshError +type tokenRefreshError struct { + message string + resp *http.Response +} + +// Error implements the error interface which is part of the TokenRefreshError interface. +func (tre tokenRefreshError) Error() string { + return tre.message +} + +// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. +func (tre tokenRefreshError) Response() *http.Response { + return tre.resp +} + +func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { + return tokenRefreshError{message: message, resp: resp} +} + // EnsureFresh will refresh the token if it will expire within the refresh window (as set by -// RefreshWithin) and autoRefresh flag is on. +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. func (spt *ServicePrincipalToken) EnsureFresh() error { if spt.autoRefresh && spt.WillExpireIn(spt.refreshWithin) { - return spt.Refresh() + // take the lock then check to see if the token was already refreshed + spt.autoRefreshLock.Lock() + defer spt.autoRefreshLock.Unlock() + if spt.WillExpireIn(spt.refreshWithin) { + return spt.Refresh() + } } return nil } @@ -341,15 +563,28 @@ func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { } // Refresh obtains a fresh token for the Service Principal. +// This method is not safe for concurrent use and should be syncrhonized. func (spt *ServicePrincipalToken) Refresh() error { return spt.refreshInternal(spt.resource) } // RefreshExchange refreshes the token, but for a different resource. +// This method is not safe for concurrent use and should be syncrhonized. func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { return spt.refreshInternal(resource) } +func (spt *ServicePrincipalToken) getGrantType() string { + switch spt.secret.(type) { + case *ServicePrincipalUsernamePasswordSecret: + return OAuthGrantTypeUserPass + case *ServicePrincipalAuthorizationCodeSecret: + return OAuthGrantTypeAuthorizationCode + default: + return OAuthGrantTypeClientCredentials + } +} + func (spt *ServicePrincipalToken) refreshInternal(resource string) error { v := url.Values{} v.Set("client_id", spt.clientID) @@ -359,7 +594,7 @@ func (spt *ServicePrincipalToken) refreshInternal(resource string) error { v.Set("grant_type", OAuthGrantTypeRefreshToken) v.Set("refresh_token", spt.RefreshToken) } else { - v.Set("grant_type", OAuthGrantTypeClientCredentials) + v.Set("grant_type", spt.getGrantType()) err := spt.secret.SetAuthenticationValues(spt, &v) if err != nil { return err @@ -388,9 +623,9 @@ func (spt *ServicePrincipalToken) refreshInternal(resource string) error { if resp.StatusCode != http.StatusOK { if err != nil { - return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body", resp.StatusCode) + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body", resp.StatusCode), resp) } - return fmt.Errorf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)) + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp) } if err != nil { diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go index 71e3ced2d6a6..4a602f6760af 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -24,9 +24,12 @@ import ( ) const ( - bearerChallengeHeader = "Www-Authenticate" - bearer = "Bearer" - tenantID = "tenantID" + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" + apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" + bingAPISdkHeader = "X-BingApis-SDK-Client" + golangBingAPISdkHeaderValue = "Go-SDK" ) // Authorizer is the interface that provides a PrepareDecorator used to supply request @@ -44,6 +47,53 @@ func (na NullAuthorizer) WithAuthorization() PrepareDecorator { return WithNothing() } +// APIKeyAuthorizer implements API Key authorization. +type APIKeyAuthorizer struct { + headers map[string]interface{} + queryParameters map[string]interface{} +} + +// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(headers, nil) +} + +// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. +func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(nil, queryParameters) +} + +// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { + return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Paramaters +func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) + } +} + +// CognitiveServicesAuthorizer implements authorization for Cognitive Services. +type CognitiveServicesAuthorizer struct { + subscriptionKey string +} + +// NewCognitiveServicesAuthorizer is +func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { + return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} +} + +// WithAuthorization is +func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[apiKeyAuthorizerHeader] = csa.subscriptionKey + headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + // BearerAuthorizer implements the bearer authorization type BearerAuthorizer struct { tokenProvider adal.OAuthTokenProvider @@ -69,7 +119,11 @@ func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { if ok { err := refresher.EnsureFresh() if err != nil { - return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", nil, + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, "Failed to refresh the Token for request to %s", r.URL) } } @@ -179,3 +233,22 @@ func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { return bc, err } + +// EventGridKeyAuthorizer implements authorization for event grid using key authentication. +type EventGridKeyAuthorizer struct { + topicKey string +} + +// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer +// with the specified topic key. +func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { + return EventGridKeyAuthorizer{topicKey: topicKey} +} + +// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. +func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { + headers := map[string]interface{}{ + "aeg-sas-key": egta.topicKey, + } + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go index 37b907c77f51..f86b66a410b1 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/autorest.go +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -87,6 +87,9 @@ const ( // ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set // and false otherwise. func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp == nil { + return false + } return containsInt(codes, resp.StatusCode) } diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go index ffbc8da28e52..366fc5379391 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -16,6 +16,8 @@ package azure import ( "bytes" + "context" + "encoding/json" "fmt" "io/ioutil" "net/http" @@ -37,6 +39,152 @@ const ( operationSucceeded string = "Succeeded" ) +var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} + +// Future provides a mechanism to access the status and results of an asynchronous request. +// Since futures are stateful they should be passed by value to avoid race conditions. +type Future struct { + req *http.Request + resp *http.Response + ps pollingState +} + +// NewFuture returns a new Future object initialized with the specified request. +func NewFuture(req *http.Request) Future { + return Future{req: req} +} + +// Response returns the last HTTP response or nil if there isn't one. +func (f Future) Response() *http.Response { + return f.resp +} + +// Status returns the last status message of the operation. +func (f Future) Status() string { + if f.ps.State == "" { + return "Unknown" + } + return f.ps.State +} + +// PollingMethod returns the method used to monitor the status of the asynchronous operation. +func (f Future) PollingMethod() PollingMethodType { + return f.ps.PollingMethod +} + +// Done queries the service to see if the operation has completed. +func (f *Future) Done(sender autorest.Sender) (bool, error) { + // exit early if this future has terminated + if f.ps.hasTerminated() { + return true, f.errorInfo() + } + + resp, err := sender.Do(f.req) + f.resp = resp + if err != nil || !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) { + return false, err + } + + err = updatePollingState(resp, &f.ps) + if err != nil { + return false, err + } + + if f.ps.hasTerminated() { + return true, f.errorInfo() + } + + f.req, err = newPollingRequest(f.ps) + return false, err +} + +// GetPollingDelay returns a duration the application should wait before checking +// the status of the asynchronous request and true; this value is returned from +// the service via the Retry-After response header. If the header wasn't returned +// then the function returns the zero-value time.Duration and false. +func (f Future) GetPollingDelay() (time.Duration, bool) { + if f.resp == nil { + return 0, false + } + + retry := f.resp.Header.Get(autorest.HeaderRetryAfter) + if retry == "" { + return 0, false + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + panic(err) + } + + return d, true +} + +// WaitForCompletion will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) error { + ctx, cancel := context.WithTimeout(ctx, client.PollingDuration) + defer cancel() + + done, err := f.Done(client) + for attempts := 0; !done; done, err = f.Done(client) { + if attempts >= client.RetryAttempts { + return autorest.NewErrorWithError(err, "azure", "WaitForCompletion", f.resp, "the number of retries has been exceeded") + } + // we want delayAttempt to be zero in the non-error case so + // that DelayForBackoff doesn't perform exponential back-off + var delayAttempt int + var delay time.Duration + if err == nil { + // check for Retry-After delay, if not present use the client's polling delay + var ok bool + delay, ok = f.GetPollingDelay() + if !ok { + delay = client.PollingDelay + } + } else { + // there was an error polling for status so perform exponential + // back-off based on the number of attempts using the client's retry + // duration. update attempts after delayAttempt to avoid off-by-one. + delayAttempt = attempts + delay = client.RetryDuration + attempts++ + } + // wait until the delay elapses or the context is cancelled + delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, ctx.Done()) + if !delayElapsed { + return autorest.NewErrorWithError(ctx.Err(), "azure", "WaitForCompletion", f.resp, "context has been cancelled") + } + } + return err +} + +// if the operation failed the polling state will contain +// error information and implements the error interface +func (f *Future) errorInfo() error { + if !f.ps.hasSucceeded() { + return f.ps + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (f Future) MarshalJSON() ([]byte, error) { + return json.Marshal(&f.ps) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (f *Future) UnmarshalJSON(data []byte) error { + err := json.Unmarshal(data, &f.ps) + if err != nil { + return err + } + f.req, err = newPollingRequest(f.ps) + return err +} + // DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure // long-running operation. It will delay between requests for the duration specified in the // RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by @@ -48,8 +196,7 @@ func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { if err != nil { return resp, err } - pollingCodes := []int{http.StatusAccepted, http.StatusCreated, http.StatusOK} - if !autorest.ResponseHasStatusCode(resp, pollingCodes...) { + if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) { return resp, nil } @@ -66,10 +213,11 @@ func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { break } - r, err = newPollingRequest(resp, ps) + r, err = newPollingRequest(ps) if err != nil { return resp, err } + r.Cancel = resp.Request.Cancel delay = autorest.GetRetryAfter(resp, delay) resp, err = autorest.SendWithSender(s, r, @@ -86,20 +234,15 @@ func getAsyncOperation(resp *http.Response) string { } func hasSucceeded(state string) bool { - return state == operationSucceeded + return strings.EqualFold(state, operationSucceeded) } func hasTerminated(state string) bool { - switch state { - case operationCanceled, operationFailed, operationSucceeded: - return true - default: - return false - } + return strings.EqualFold(state, operationCanceled) || strings.EqualFold(state, operationFailed) || strings.EqualFold(state, operationSucceeded) } func hasFailed(state string) bool { - return state == operationFailed + return strings.EqualFold(state, operationFailed) } type provisioningTracker interface { @@ -160,36 +303,42 @@ func (ps provisioningStatus) hasProvisioningError() bool { return ps.ProvisioningError != ServiceError{} } -type pollingResponseFormat string +// PollingMethodType defines a type used for enumerating polling mechanisms. +type PollingMethodType string const ( - usesOperationResponse pollingResponseFormat = "OperationResponse" - usesProvisioningStatus pollingResponseFormat = "ProvisioningStatus" - formatIsUnknown pollingResponseFormat = "" + // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. + PollingAsyncOperation PollingMethodType = "AsyncOperation" + + // PollingLocation indicates the polling method uses the Location header. + PollingLocation PollingMethodType = "Location" + + // PollingUnknown indicates an unknown polling method and is the default value. + PollingUnknown PollingMethodType = "" ) type pollingState struct { - responseFormat pollingResponseFormat - uri string - state string - code string - message string + PollingMethod PollingMethodType `json:"pollingMethod"` + URI string `json:"uri"` + State string `json:"state"` + Code string `json:"code"` + Message string `json:"message"` } func (ps pollingState) hasSucceeded() bool { - return hasSucceeded(ps.state) + return hasSucceeded(ps.State) } func (ps pollingState) hasTerminated() bool { - return hasTerminated(ps.state) + return hasTerminated(ps.State) } func (ps pollingState) hasFailed() bool { - return hasFailed(ps.state) + return hasFailed(ps.State) } func (ps pollingState) Error() string { - return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.state, ps.code, ps.message) + return fmt.Sprintf("Long running operation terminated with status '%s': Code=%q Message=%q", ps.State, ps.Code, ps.Message) } // updatePollingState maps the operation status -- retrieved from either a provisioningState @@ -204,7 +353,7 @@ func updatePollingState(resp *http.Response, ps *pollingState) error { // -- The first response will always be a provisioningStatus response; only the polling requests, // depending on the header returned, may be something otherwise. var pt provisioningTracker - if ps.responseFormat == usesOperationResponse { + if ps.PollingMethod == PollingAsyncOperation { pt = &operationResource{} } else { pt = &provisioningStatus{} @@ -212,30 +361,30 @@ func updatePollingState(resp *http.Response, ps *pollingState) error { // If this is the first request (that is, the polling response shape is unknown), determine how // to poll and what to expect - if ps.responseFormat == formatIsUnknown { + if ps.PollingMethod == PollingUnknown { req := resp.Request if req == nil { return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Original HTTP request is missing") } // Prefer the Azure-AsyncOperation header - ps.uri = getAsyncOperation(resp) - if ps.uri != "" { - ps.responseFormat = usesOperationResponse + ps.URI = getAsyncOperation(resp) + if ps.URI != "" { + ps.PollingMethod = PollingAsyncOperation } else { - ps.responseFormat = usesProvisioningStatus + ps.PollingMethod = PollingLocation } // Else, use the Location header - if ps.uri == "" { - ps.uri = autorest.GetLocation(resp) + if ps.URI == "" { + ps.URI = autorest.GetLocation(resp) } // Lastly, requests against an existing resource, use the last request URI - if ps.uri == "" { + if ps.URI == "" { m := strings.ToUpper(req.Method) if m == http.MethodPatch || m == http.MethodPut || m == http.MethodGet { - ps.uri = req.URL.String() + ps.URI = req.URL.String() } } } @@ -256,23 +405,23 @@ func updatePollingState(resp *http.Response, ps *pollingState) error { // -- Unknown states are per-service inprogress states // -- Otherwise, infer state from HTTP status code if pt.hasTerminated() { - ps.state = pt.state() + ps.State = pt.state() } else if pt.state() != "" { - ps.state = operationInProgress + ps.State = operationInProgress } else { switch resp.StatusCode { case http.StatusAccepted: - ps.state = operationInProgress + ps.State = operationInProgress case http.StatusNoContent, http.StatusCreated, http.StatusOK: - ps.state = operationSucceeded + ps.State = operationSucceeded default: - ps.state = operationFailed + ps.State = operationFailed } } - if ps.state == operationInProgress && ps.uri == "" { + if strings.EqualFold(ps.State, operationInProgress) && ps.URI == "" { return autorest.NewError("azure", "updatePollingState", "Azure Polling Error - Unable to obtain polling URI for %s %s", resp.Request.Method, resp.Request.URL) } @@ -281,36 +430,49 @@ func updatePollingState(resp *http.Response, ps *pollingState) error { // -- Response // -- Otherwise, Unknown if ps.hasFailed() { - if ps.responseFormat == usesOperationResponse { + if ps.PollingMethod == PollingAsyncOperation { or := pt.(*operationResource) - ps.code = or.OperationError.Code - ps.message = or.OperationError.Message + ps.Code = or.OperationError.Code + ps.Message = or.OperationError.Message } else { p := pt.(*provisioningStatus) if p.hasProvisioningError() { - ps.code = p.ProvisioningError.Code - ps.message = p.ProvisioningError.Message + ps.Code = p.ProvisioningError.Code + ps.Message = p.ProvisioningError.Message } else { - ps.code = "Unknown" - ps.message = "None" + ps.Code = "Unknown" + ps.Message = "None" } } } return nil } -func newPollingRequest(resp *http.Response, ps pollingState) (*http.Request, error) { - req := resp.Request - if req == nil { - return nil, autorest.NewError("azure", "newPollingRequest", "Azure Polling Error - Original HTTP request is missing") - } - - reqPoll, err := autorest.Prepare(&http.Request{Cancel: req.Cancel}, +func newPollingRequest(ps pollingState) (*http.Request, error) { + reqPoll, err := autorest.Prepare(&http.Request{}, autorest.AsGet(), - autorest.WithBaseURL(ps.uri)) + autorest.WithBaseURL(ps.URI)) if err != nil { - return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.uri) + return nil, autorest.NewErrorWithError(err, "azure", "newPollingRequest", nil, "Failure creating poll request to %s", ps.URI) } return reqPoll, nil } + +// AsyncOpIncompleteError is the type that's returned from a future that has not completed. +type AsyncOpIncompleteError struct { + // FutureType is the name of the type composed of a azure.Future. + FutureType string +} + +// Error returns an error message including the originating type name of the error. +func (e AsyncOpIncompleteError) Error() string { + return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) +} + +// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. +func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { + return AsyncOpIncompleteError{ + FutureType: futureType, + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go index 30c4351a576a..936836493ba1 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -15,10 +15,17 @@ package azure // limitations under the License. import ( + "encoding/json" "fmt" + "io/ioutil" + "os" "strings" ) +// EnvironmentFilepathName captures the name of the environment variable containing the path to the file +// to be used while populating the Azure Environment. +const EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" + var environments = map[string]Environment{ "AZURECHINACLOUD": ChinaCloud, "AZUREGERMANCLOUD": GermanCloud, @@ -76,10 +83,10 @@ var ( PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", - ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", GalleryEndpoint: "https://gallery.usgovcloudapi.net/", KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", - GraphEndpoint: "https://graph.usgovcloudapi.net/", + GraphEndpoint: "https://graph.windows.net/", StorageEndpointSuffix: "core.usgovcloudapi.net", SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", TrafficManagerDNSSuffix: "usgovtrafficmanager.net", @@ -133,12 +140,37 @@ var ( } ) -// EnvironmentFromName returns an Environment based on the common name specified +// EnvironmentFromName returns an Environment based on the common name specified. func EnvironmentFromName(name string) (Environment, error) { + // IMPORTANT + // As per @radhikagupta5: + // This is technical debt, fundamentally here because Kubernetes is not currently accepting + // contributions to the providers. Once that is an option, the provider should be updated to + // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation + // from this method based on the name that is provided to us. + if strings.EqualFold(name, "AZURESTACKCLOUD") { + return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) + } + name = strings.ToUpper(name) env, ok := environments[name] if !ok { return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) } + return env, nil } + +// EnvironmentFromFile loads an Environment from a configuration file available on disk. +// This function is particularly useful in the Hybrid Cloud model, where one must define their own +// endpoints. +func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { + fileContents, err := ioutil.ReadFile(location) + if err != nil { + return + } + + err = json.Unmarshal(fileContents, &unmarshaled) + + return +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go index 40d5f5ba002f..b6b95d6fdbcb 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -1,3 +1,17 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package azure import ( @@ -30,7 +44,7 @@ func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { return resp, err } - if resp.StatusCode != http.StatusConflict { + if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { return resp, err } var re RequestError @@ -41,15 +55,16 @@ func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { if err != nil { return resp, err } + err = re if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { - err = register(client, r, re) - if err != nil { - return resp, fmt.Errorf("failed auto registering Resource Provider: %s", err) + regErr := register(client, r, re) + if regErr != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) } } } - return resp, errors.New("failed request and resource provider registration") + return resp, fmt.Errorf("failed request: %s", err) }) } } @@ -144,7 +159,7 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError } req.Cancel = originalReq.Cancel - resp, err := autorest.SendWithSender(client.Sender, req, + resp, err := autorest.SendWithSender(client, req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), ) if err != nil { diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go index c857e761168b..d329cb737799 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/client.go +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -35,6 +35,9 @@ const ( // DefaultRetryAttempts is number of attempts for retry status codes (5xx). DefaultRetryAttempts = 3 + + // DefaultRetryDuration is the duration to wait between retries. + DefaultRetryDuration = 30 * time.Second ) var ( @@ -163,6 +166,9 @@ type Client struct { UserAgent string Jar http.CookieJar + + // Set to true to skip attempted registration of resource providers (false by default). + SkipResourceProviderRegistration bool } // NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed @@ -172,9 +178,10 @@ func NewClientWithUserAgent(ua string) Client { PollingDelay: DefaultPollingDelay, PollingDuration: DefaultPollingDuration, RetryAttempts: DefaultRetryAttempts, - RetryDuration: 30 * time.Second, + RetryDuration: DefaultRetryDuration, UserAgent: defaultUserAgent, } + c.Sender = c.sender() c.AddToUserAgent(ua) return c } @@ -200,11 +207,17 @@ func (c Client) Do(r *http.Request) (*http.Response, error) { c.WithInspection(), c.WithAuthorization()) if err != nil { - return nil, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + var resp *http.Response + if detErr, ok := err.(DetailedError); ok { + // if the authorization failed (e.g. invalid credentials) there will + // be a response associated with the error, be sure to return it. + resp = detErr.Response + } + return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") } + resp, err := SendWithSender(c.sender(), r) - Respond(resp, - c.ByInspecting()) + Respond(resp, c.ByInspecting()) return resp, err } diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go index 2290c4010032..6d67bd7337b7 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/preparer.go +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -27,8 +27,9 @@ import ( ) const ( - mimeTypeJSON = "application/json" - mimeTypeFormPost = "application/x-www-form-urlencoded" + mimeTypeJSON = "application/json" + mimeTypeOctetStream = "application/octet-stream" + mimeTypeFormPost = "application/x-www-form-urlencoded" headerAuthorization = "Authorization" headerContentType = "Content-Type" @@ -112,6 +113,28 @@ func WithHeader(header string, value string) PrepareDecorator { } } +// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to +// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before +// adding them. +func WithHeaders(headers map[string]interface{}) PrepareDecorator { + h := ensureValueStrings(headers) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + + for name, value := range h { + r.Header.Set(http.CanonicalHeaderKey(name), value) + } + } + return r, err + }) + } +} + // WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose // value is "Bearer " followed by the supplied token. func WithBearerAuthorization(token string) PrepareDecorator { @@ -142,6 +165,11 @@ func AsJSON() PrepareDecorator { return AsContentType(mimeTypeJSON) } +// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. +func AsOctetStream() PrepareDecorator { + return AsContentType(mimeTypeOctetStream) +} + // WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The // decorator does not validate that the passed method string is a known HTTP method. func WithMethod(method string) PrepareDecorator { @@ -215,6 +243,11 @@ func WithFormData(v url.Values) PrepareDecorator { r, err := p.Prepare(r) if err == nil { s := v.Encode() + + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) r.ContentLength = int64(len(s)) r.Body = ioutil.NopCloser(strings.NewReader(s)) } @@ -430,11 +463,16 @@ func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorato if r.URL == nil { return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") } + v := r.URL.Query() for key, value := range parameters { - v.Add(key, value) + d, err := url.QueryUnescape(value) + if err != nil { + return r, err + } + v.Add(key, d) } - r.URL.RawQuery = createQuery(v) + r.URL.RawQuery = v.Encode() } return r, err }) diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go index 7264c32f27db..c5efd59a219f 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/sender.go +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -215,19 +215,26 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se rr := NewRetriableRequest(r) // Increment to add the first call (attempts denotes number of retries) attempts++ - for attempt := 0; attempt < attempts; attempt++ { + for attempt := 0; attempt < attempts; { err = rr.Prepare() if err != nil { return resp, err } resp, err = s.Do(rr.Request()) - if err != nil || !ResponseHasStatusCode(resp, codes...) { + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { return resp, err } delayed := DelayWithRetryAfter(resp, r.Cancel) if !delayed { DelayForBackoff(backoff, attempt, r.Cancel) } + // don't count a 429 against the number of attempts + // so that we continue to retry until it succeeds + if resp == nil || resp.StatusCode != http.StatusTooManyRequests { + attempt++ + } } return resp, err }) @@ -237,6 +244,9 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se // DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in // responses with status code 429 func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + if resp == nil { + return false + } retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { select { diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go index dfdc6efdff06..afb3e4e161ba 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/utility.go +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -20,10 +20,12 @@ import ( "encoding/xml" "fmt" "io" + "net/http" "net/url" "reflect" - "sort" "strings" + + "github.com/Azure/go-autorest/autorest/adal" ) // EncodedAs is a series of constants specifying various data encodings @@ -137,13 +139,38 @@ func MapToValues(m map[string]interface{}) url.Values { return v } +// AsStringSlice method converts interface{} to []string. This expects a +//that the parameter passed to be a slice or array of a type that has the underlying +//type a string. +func AsStringSlice(s interface{}) ([]string, error) { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.") + } + stringSlice := make([]string, 0, v.Len()) + + for i := 0; i < v.Len(); i++ { + stringSlice = append(stringSlice, v.Index(i).String()) + } + return stringSlice, nil +} + // String method converts interface v to string. If interface is a list, it -// joins list elements using separator. +// joins list elements using the seperator. Note that only sep[0] will be used for +// joining if any separator is specified. func String(v interface{}, sep ...string) string { - if len(sep) > 0 { - return ensureValueString(strings.Join(v.([]string), sep[0])) + if len(sep) == 0 { + return ensureValueString(v) } - return ensureValueString(v) + stringSlice, ok := v.([]string) + if ok == false { + var err error + stringSlice, err = AsStringSlice(v) + if err != nil { + panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) + } + } + return ensureValueString(strings.Join(stringSlice, sep[0])) } // Encode method encodes url path and query parameters. @@ -167,26 +194,25 @@ func queryEscape(s string) string { return url.QueryEscape(s) } -// This method is same as Encode() method of "net/url" go package, -// except it does not encode the query parameters because they -// already come encoded. It formats values map in query format (bar=foo&a=b). -func createQuery(v url.Values) string { - var buf bytes.Buffer - keys := make([]string, 0, len(v)) - for k := range v { - keys = append(keys, k) +// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). +// This is mainly useful for long-running operations that use the Azure-AsyncOperation +// header, so we change the initial PUT into a GET to retrieve the final result. +func ChangeToGet(req *http.Request) *http.Request { + req.Method = "GET" + req.Body = nil + req.ContentLength = 0 + req.Header.Del("Content-Length") + return req +} + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError +// interface. If err is a DetailedError it will walk the chain of Original errors. +func IsTokenRefreshError(err error) bool { + if _, ok := err.(adal.TokenRefreshError); ok { + return true } - sort.Strings(keys) - for _, k := range keys { - vs := v[k] - prefix := url.QueryEscape(k) + "=" - for _, v := range vs { - if buf.Len() > 0 { - buf.WriteByte('&') - } - buf.WriteString(prefix) - buf.WriteString(v) - } + if de, ok := err.(DetailedError); ok { + return IsTokenRefreshError(de.Original) } - return buf.String() + return false } diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go index f588807dbb9c..a19c0d35a2d4 100644 --- a/vendor/github.com/Azure/go-autorest/autorest/version.go +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -22,9 +22,9 @@ import ( ) const ( - major = 8 - minor = 0 - patch = 0 + major = 9 + minor = 8 + patch = 1 tag = "" ) diff --git a/vendor/github.com/boltdb/bolt/.gitignore b/vendor/github.com/boltdb/bolt/.gitignore deleted file mode 100644 index c7bd2b7a5b84..000000000000 --- a/vendor/github.com/boltdb/bolt/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.prof -*.test -*.swp -/bin/ diff --git a/vendor/github.com/boltdb/bolt/Makefile b/vendor/github.com/boltdb/bolt/Makefile deleted file mode 100644 index e035e63adcd7..000000000000 --- a/vendor/github.com/boltdb/bolt/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -BRANCH=`git rev-parse --abbrev-ref HEAD` -COMMIT=`git rev-parse --short HEAD` -GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" - -default: build - -race: - @go test -v -race -test.run="TestSimulate_(100op|1000op)" - -# go get github.com/kisielk/errcheck -errcheck: - @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt - -test: - @go test -v -cover . - @go test -v ./cmd/bolt - -.PHONY: fmt test diff --git a/vendor/github.com/boltdb/bolt/README.md b/vendor/github.com/boltdb/bolt/README.md deleted file mode 100644 index 8523e3377344..000000000000 --- a/vendor/github.com/boltdb/bolt/README.md +++ /dev/null @@ -1,852 +0,0 @@ -Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg) -==== - -Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] -[LMDB project][lmdb]. The goal of the project is to provide a simple, -fast, and reliable database for projects that don't require a full database -server such as Postgres or MySQL. - -Since Bolt is meant to be used as such a low-level piece of functionality, -simplicity is key. The API will be small and only focus on getting values -and setting values. That's it. - -[hyc_symas]: https://twitter.com/hyc_symas -[lmdb]: http://symas.com/mdb/ - -## Project Status - -Bolt is stable and the API is fixed. Full unit test coverage and randomized -black box testing are used to ensure database consistency and thread safety. -Bolt is currently in high-load production environments serving databases as -large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed -services every day. - -## Table of Contents - -- [Getting Started](#getting-started) - - [Installing](#installing) - - [Opening a database](#opening-a-database) - - [Transactions](#transactions) - - [Read-write transactions](#read-write-transactions) - - [Read-only transactions](#read-only-transactions) - - [Batch read-write transactions](#batch-read-write-transactions) - - [Managing transactions manually](#managing-transactions-manually) - - [Using buckets](#using-buckets) - - [Using key/value pairs](#using-keyvalue-pairs) - - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) - - [Iterating over keys](#iterating-over-keys) - - [Prefix scans](#prefix-scans) - - [Range scans](#range-scans) - - [ForEach()](#foreach) - - [Nested buckets](#nested-buckets) - - [Database backups](#database-backups) - - [Statistics](#statistics) - - [Read-Only Mode](#read-only-mode) - - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) -- [Resources](#resources) -- [Comparison with other databases](#comparison-with-other-databases) - - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) - - [LevelDB, RocksDB](#leveldb-rocksdb) - - [LMDB](#lmdb) -- [Caveats & Limitations](#caveats--limitations) -- [Reading the Source](#reading-the-source) -- [Other Projects Using Bolt](#other-projects-using-bolt) - -## Getting Started - -### Installing - -To start using Bolt, install Go and run `go get`: - -```sh -$ go get github.com/boltdb/bolt/... -``` - -This will retrieve the library and install the `bolt` command line utility into -your `$GOBIN` path. - - -### Opening a database - -The top-level object in Bolt is a `DB`. It is represented as a single file on -your disk and represents a consistent snapshot of your data. - -To open your database, simply use the `bolt.Open()` function: - -```go -package main - -import ( - "log" - - "github.com/boltdb/bolt" -) - -func main() { - // Open the my.db data file in your current directory. - // It will be created if it doesn't exist. - db, err := bolt.Open("my.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - defer db.Close() - - ... -} -``` - -Please note that Bolt obtains a file lock on the data file so multiple processes -cannot open the same database at the same time. Opening an already open Bolt -database will cause it to hang until the other process closes it. To prevent -an indefinite wait you can pass a timeout option to the `Open()` function: - -```go -db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) -``` - - -### Transactions - -Bolt allows only one read-write transaction at a time but allows as many -read-only transactions as you want at a time. Each transaction has a consistent -view of the data as it existed when the transaction started. - -Individual transactions and all objects created from them (e.g. buckets, keys) -are not thread safe. To work with data in multiple goroutines you must start -a transaction for each one or use locking to ensure only one goroutine accesses -a transaction at a time. Creating transaction from the `DB` is thread safe. - -Read-only transactions and read-write transactions should not depend on one -another and generally shouldn't be opened simultaneously in the same goroutine. -This can cause a deadlock as the read-write transaction needs to periodically -re-map the data file but it cannot do so while a read-only transaction is open. - - -#### Read-write transactions - -To start a read-write transaction, you can use the `DB.Update()` function: - -```go -err := db.Update(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Inside the closure, you have a consistent view of the database. You commit the -transaction by returning `nil` at the end. You can also rollback the transaction -at any point by returning an error. All database operations are allowed inside -a read-write transaction. - -Always check the return error as it will report any disk failures that can cause -your transaction to not complete. If you return an error within your closure -it will be passed through. - - -#### Read-only transactions - -To start a read-only transaction, you can use the `DB.View()` function: - -```go -err := db.View(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -You also get a consistent view of the database within this closure, however, -no mutating operations are allowed within a read-only transaction. You can only -retrieve buckets, retrieve values, and copy the database within a read-only -transaction. - - -#### Batch read-write transactions - -Each `DB.Update()` waits for disk to commit the writes. This overhead -can be minimized by combining multiple updates with the `DB.Batch()` -function: - -```go -err := db.Batch(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Concurrent Batch calls are opportunistically combined into larger -transactions. Batch is only useful when there are multiple goroutines -calling it. - -The trade-off is that `Batch` can call the given -function multiple times, if parts of the transaction fail. The -function must be idempotent and side effects must take effect only -after a successful return from `DB.Batch()`. - -For example: don't display messages from inside the function, instead -set variables in the enclosing scope: - -```go -var id uint64 -err := db.Batch(func(tx *bolt.Tx) error { - // Find last key in bucket, decode as bigendian uint64, increment - // by one, encode back to []byte, and add new key. - ... - id = newValue - return nil -}) -if err != nil { - return ... -} -fmt.Println("Allocated ID %d", id) -``` - - -#### Managing transactions manually - -The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` -function. These helper functions will start the transaction, execute a function, -and then safely close your transaction if an error is returned. This is the -recommended way to use Bolt transactions. - -However, sometimes you may want to manually start and end your transactions. -You can use the `Tx.Begin()` function directly but **please** be sure to close -the transaction. - -```go -// Start a writable transaction. -tx, err := db.Begin(true) -if err != nil { - return err -} -defer tx.Rollback() - -// Use the transaction... -_, err := tx.CreateBucket([]byte("MyBucket")) -if err != nil { - return err -} - -// Commit the transaction and check for error. -if err := tx.Commit(); err != nil { - return err -} -``` - -The first argument to `DB.Begin()` is a boolean stating if the transaction -should be writable. - - -### Using buckets - -Buckets are collections of key/value pairs within the database. All keys in a -bucket must be unique. You can create a bucket using the `DB.CreateBucket()` -function: - -```go -db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("MyBucket")) - if err != nil { - return fmt.Errorf("create bucket: %s", err) - } - return nil -}) -``` - -You can also create a bucket only if it doesn't exist by using the -`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this -function for all your top-level buckets after you open your database so you can -guarantee that they exist for future transactions. - -To delete a bucket, simply call the `Tx.DeleteBucket()` function. - - -### Using key/value pairs - -To save a key/value pair to a bucket, use the `Bucket.Put()` function: - -```go -db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - err := b.Put([]byte("answer"), []byte("42")) - return err -}) -``` - -This will set the value of the `"answer"` key to `"42"` in the `MyBucket` -bucket. To retrieve this value, we can use the `Bucket.Get()` function: - -```go -db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - v := b.Get([]byte("answer")) - fmt.Printf("The answer is: %s\n", v) - return nil -}) -``` - -The `Get()` function does not return an error because its operation is -guaranteed to work (unless there is some kind of system failure). If the key -exists then it will return its byte slice value. If it doesn't exist then it -will return `nil`. It's important to note that you can have a zero-length value -set to a key which is different than the key not existing. - -Use the `Bucket.Delete()` function to delete a key from the bucket. - -Please note that values returned from `Get()` are only valid while the -transaction is open. If you need to use a value outside of the transaction -then you must use `copy()` to copy it to another byte slice. - - -### Autoincrementing integer for the bucket -By using the `NextSequence()` function, you can let Bolt determine a sequence -which can be used as the unique identifier for your key/value pairs. See the -example below. - -```go -// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. -func (s *Store) CreateUser(u *User) error { - return s.db.Update(func(tx *bolt.Tx) error { - // Retrieve the users bucket. - // This should be created when the DB is first opened. - b := tx.Bucket([]byte("users")) - - // Generate ID for the user. - // This returns an error only if the Tx is closed or not writeable. - // That can't happen in an Update() call so I ignore the error check. - id, _ := b.NextSequence() - u.ID = int(id) - - // Marshal user data into bytes. - buf, err := json.Marshal(u) - if err != nil { - return err - } - - // Persist bytes to users bucket. - return b.Put(itob(u.ID), buf) - }) -} - -// itob returns an 8-byte big endian representation of v. -func itob(v int) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, uint64(v)) - return b -} - -type User struct { - ID int - ... -} -``` - -### Iterating over keys - -Bolt stores its keys in byte-sorted order within a bucket. This makes sequential -iteration over these keys extremely fast. To iterate over keys we'll use a -`Cursor`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - c := b.Cursor() - - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -The cursor allows you to move to a specific point in the list of keys and move -forward or backward through the keys one at a time. - -The following functions are available on the cursor: - -``` -First() Move to the first key. -Last() Move to the last key. -Seek() Move to a specific key. -Next() Move to the next key. -Prev() Move to the previous key. -``` - -Each of those functions has a return signature of `(key []byte, value []byte)`. -When you have iterated to the end of the cursor then `Next()` will return a -`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` -before calling `Next()` or `Prev()`. If you do not seek to a position then -these functions will return a `nil` key. - -During iteration, if the key is non-`nil` but the value is `nil`, that means -the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to -access the sub-bucket. - - -#### Prefix scans - -To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - c := tx.Bucket([]byte("MyBucket")).Cursor() - - prefix := []byte("1234") - for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -#### Range scans - -Another common use case is scanning over a range such as a time range. If you -use a sortable time encoding such as RFC3339 then you can query a specific -date range like this: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume our events bucket exists and has RFC3339 encoded time keys. - c := tx.Bucket([]byte("Events")).Cursor() - - // Our time range spans the 90's decade. - min := []byte("1990-01-01T00:00:00Z") - max := []byte("2000-01-01T00:00:00Z") - - // Iterate over the 90's. - for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { - fmt.Printf("%s: %s\n", k, v) - } - - return nil -}) -``` - -Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. - - -#### ForEach() - -You can also use the function `ForEach()` if you know you'll be iterating over -all the keys in a bucket: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - b.ForEach(func(k, v []byte) error { - fmt.Printf("key=%s, value=%s\n", k, v) - return nil - }) - return nil -}) -``` - - -### Nested buckets - -You can also store a bucket in a key to create nested buckets. The API is the -same as the bucket management API on the `DB` object: - -```go -func (*Bucket) CreateBucket(key []byte) (*Bucket, error) -func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) -func (*Bucket) DeleteBucket(key []byte) error -``` - - -### Database backups - -Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` -function to write a consistent view of the database to a writer. If you call -this from a read-only transaction, it will perform a hot backup and not block -your other database reads and writes. - -By default, it will use a regular file handle which will utilize the operating -system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) -documentation for information about optimizing for larger-than-RAM datasets. - -One common use case is to backup over HTTP so you can use tools like `cURL` to -do database backups: - -```go -func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { - err := db.View(func(tx *bolt.Tx) error { - w.Header().Set("Content-Type", "application/octet-stream") - w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) - w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) - _, err := tx.WriteTo(w) - return err - }) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} -``` - -Then you can backup using this command: - -```sh -$ curl http://localhost/backup > my.db -``` - -Or you can open your browser to `http://localhost/backup` and it will download -automatically. - -If you want to backup to another file you can use the `Tx.CopyFile()` helper -function. - - -### Statistics - -The database keeps a running count of many of the internal operations it -performs so you can better understand what's going on. By grabbing a snapshot -of these stats at two points in time we can see what operations were performed -in that time range. - -For example, we could start a goroutine to log stats every 10 seconds: - -```go -go func() { - // Grab the initial stats. - prev := db.Stats() - - for { - // Wait for 10s. - time.Sleep(10 * time.Second) - - // Grab the current stats and diff them. - stats := db.Stats() - diff := stats.Sub(&prev) - - // Encode stats to JSON and print to STDERR. - json.NewEncoder(os.Stderr).Encode(diff) - - // Save stats for the next loop. - prev = stats - } -}() -``` - -It's also useful to pipe these stats to a service such as statsd for monitoring -or to provide an HTTP endpoint that will perform a fixed-length sample. - - -### Read-Only Mode - -Sometimes it is useful to create a shared, read-only Bolt database. To this, -set the `Options.ReadOnly` flag when opening your database. Read-only mode -uses a shared lock to allow multiple processes to read from the database but -it will block any processes from opening the database in read-write mode. - -```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) -if err != nil { - log.Fatal(err) -} -``` - -### Mobile Use (iOS/Android) - -Bolt is able to run on mobile devices by leveraging the binding feature of the -[gomobile](https://github.com/golang/mobile) tool. Create a struct that will -contain your database logic and a reference to a `*bolt.DB` with a initializing -constructor that takes in a filepath where the database file will be stored. -Neither Android nor iOS require extra permissions or cleanup from using this method. - -```go -func NewBoltDB(filepath string) *BoltDB { - db, err := bolt.Open(filepath+"/demo.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - - return &BoltDB{db} -} - -type BoltDB struct { - db *bolt.DB - ... -} - -func (b *BoltDB) Path() string { - return b.db.Path() -} - -func (b *BoltDB) Close() { - b.db.Close() -} -``` - -Database logic should be defined as methods on this wrapper struct. - -To initialize this struct from the native language (both platforms now sync -their local storage to the cloud. These snippets disable that functionality for the -database file): - -#### Android - -```java -String path; -if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ - path = getNoBackupFilesDir().getAbsolutePath(); -} else{ - path = getFilesDir().getAbsolutePath(); -} -Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) -``` - -#### iOS - -```objc -- (void)demo { - NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, - NSUserDomainMask, - YES) objectAtIndex:0]; - GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); - [self addSkipBackupAttributeToItemAtPath:demo.path]; - //Some DB Logic would go here - [demo close]; -} - -- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString -{ - NSURL* URL= [NSURL fileURLWithPath: filePathString]; - assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); - - NSError *error = nil; - BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] - forKey: NSURLIsExcludedFromBackupKey error: &error]; - if(!success){ - NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); - } - return success; -} - -``` - -## Resources - -For more information on getting started with Bolt, check out the following articles: - -* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). -* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville - - -## Comparison with other databases - -### Postgres, MySQL, & other relational databases - -Relational databases structure data into rows and are only accessible through -the use of SQL. This approach provides flexibility in how you store and query -your data but also incurs overhead in parsing and planning SQL statements. Bolt -accesses all data by a byte slice key. This makes Bolt fast to read and write -data by key but provides no built-in support for joining values together. - -Most relational databases (with the exception of SQLite) are standalone servers -that run separately from your application. This gives your systems -flexibility to connect multiple application servers to a single database -server but also adds overhead in serializing and transporting data over the -network. Bolt runs as a library included in your application so all data access -has to go through your application's process. This brings data closer to your -application but limits multi-process access to the data. - - -### LevelDB, RocksDB - -LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that -they are libraries bundled into the application, however, their underlying -structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes -random writes by using a write ahead log and multi-tiered, sorted files called -SSTables. Bolt uses a B+tree internally and only a single file. Both approaches -have trade-offs. - -If you require a high random write throughput (>10,000 w/sec) or you need to use -spinning disks then LevelDB could be a good choice. If your application is -read-heavy or does a lot of range scans then Bolt could be a good choice. - -One other important consideration is that LevelDB does not have transactions. -It supports batch writing of key/values pairs and it supports read snapshots -but it will not give you the ability to do a compare-and-swap operation safely. -Bolt supports fully serializable ACID transactions. - - -### LMDB - -Bolt was originally a port of LMDB so it is architecturally similar. Both use -a B+tree, have ACID semantics with fully serializable transactions, and support -lock-free MVCC using a single writer and multiple readers. - -The two projects have somewhat diverged. LMDB heavily focuses on raw performance -while Bolt has focused on simplicity and ease of use. For example, LMDB allows -several unsafe actions such as direct writes for the sake of performance. Bolt -opts to disallow actions which can leave the database in a corrupted state. The -only exception to this in Bolt is `DB.NoSync`. - -There are also a few differences in API. LMDB requires a maximum mmap size when -opening an `mdb_env` whereas Bolt will handle incremental mmap resizing -automatically. LMDB overloads the getter and setter functions with multiple -flags whereas Bolt splits these specialized cases into their own functions. - - -## Caveats & Limitations - -It's important to pick the right tool for the job and Bolt is no exception. -Here are a few things to note when evaluating and using Bolt: - -* Bolt is good for read intensive workloads. Sequential write performance is - also fast but random writes can be slow. You can use `DB.Batch()` or add a - write-ahead log to help mitigate this issue. - -* Bolt uses a B+tree internally so there can be a lot of random page access. - SSDs provide a significant performance boost over spinning disks. - -* Try to avoid long running read transactions. Bolt uses copy-on-write so - old pages cannot be reclaimed while an old transaction is using them. - -* Byte slices returned from Bolt are only valid during a transaction. Once the - transaction has been committed or rolled back then the memory they point to - can be reused by a new page or can be unmapped from virtual memory and you'll - see an `unexpected fault address` panic when accessing it. - -* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for - buckets that have random inserts will cause your database to have very poor - page utilization. - -* Use larger buckets in general. Smaller buckets causes poor page utilization - once they become larger than the page size (typically 4KB). - -* Bulk loading a lot of random writes into a new bucket can be slow as the - page will not split until the transaction is committed. Randomly inserting - more than 100,000 key/value pairs into a single new bucket in a single - transaction is not advised. - -* Bolt uses a memory-mapped file so the underlying operating system handles the - caching of the data. Typically, the OS will cache as much of the file as it - can in memory and will release memory as needed to other processes. This means - that Bolt can show very high memory usage when working with large databases. - However, this is expected and the OS will release memory as needed. Bolt can - handle databases much larger than the available physical RAM, provided its - memory-map fits in the process virtual address space. It may be problematic - on 32-bits systems. - -* The data structures in the Bolt database are memory mapped so the data file - will be endian specific. This means that you cannot copy a Bolt file from a - little endian machine to a big endian machine and have it work. For most - users this is not a concern since most modern CPUs are little endian. - -* Because of the way pages are laid out on disk, Bolt cannot truncate data files - and return free pages back to the disk. Instead, Bolt maintains a free list - of unused pages within its data file. These free pages can be reused by later - transactions. This works well for many use cases as databases generally tend - to grow. However, it's important to note that deleting large chunks of data - will not allow you to reclaim that space on disk. - - For more information on page allocation, [see this comment][page-allocation]. - -[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 - - -## Reading the Source - -Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, -transactional key/value database so it can be a good starting point for people -interested in how databases work. - -The best places to start are the main entry points into Bolt: - -- `Open()` - Initializes the reference to the database. It's responsible for - creating the database if it doesn't exist, obtaining an exclusive lock on the - file, reading the meta pages, & memory-mapping the file. - -- `DB.Begin()` - Starts a read-only or read-write transaction depending on the - value of the `writable` argument. This requires briefly obtaining the "meta" - lock to keep track of open transactions. Only one read-write transaction can - exist at a time so the "rwlock" is acquired during the life of a read-write - transaction. - -- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the - arguments, a cursor is used to traverse the B+tree to the page and position - where they key & value will be written. Once the position is found, the bucket - materializes the underlying page and the page's parent pages into memory as - "nodes". These nodes are where mutations occur during read-write transactions. - These changes get flushed to disk during commit. - -- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor - to move to the page & position of a key/value pair. During a read-only - transaction, the key and value data is returned as a direct reference to the - underlying mmap file so there's no allocation overhead. For read-write - transactions, this data may reference the mmap file or one of the in-memory - node values. - -- `Cursor` - This object is simply for traversing the B+tree of on-disk pages - or in-memory nodes. It can seek to a specific key, move to the first or last - value, or it can move forward or backward. The cursor handles the movement up - and down the B+tree transparently to the end user. - -- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages - into pages to be written to disk. Writing to disk then occurs in two phases. - First, the dirty pages are written to disk and an `fsync()` occurs. Second, a - new meta page with an incremented transaction ID is written and another - `fsync()` occurs. This two phase write ensures that partially written data - pages are ignored in the event of a crash since the meta page pointing to them - is never written. Partially written meta pages are invalidated because they - are written with a checksum. - -If you have additional notes that could be helpful for others, please submit -them via pull request. - - -## Other Projects Using Bolt - -Below is a list of public, open source projects that use Bolt: - -* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. -* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. -* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. -* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. -* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. -* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. -* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. -* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. -* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. -* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". -* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. -* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. -* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. -* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. -* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. -* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. -* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. -* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. -* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. -* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. -* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. -* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. -* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. -* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. -* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. -* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. -* [stow](https://github.com/djherbis/stow) - a persistence manager for objects - backed by boltdb. -* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining - simple tx and key scans. -* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. -* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service -* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. -* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. -* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. -* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. -* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. -* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. -* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. -* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. -* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. - -If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/boltdb/bolt/bolt_386.go deleted file mode 100644 index e659bfb91f33..000000000000 --- a/vendor/github.com/boltdb/bolt/bolt_386.go +++ /dev/null @@ -1,7 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/boltdb/bolt/bolt_amd64.go deleted file mode 100644 index cca6b7eb7070..000000000000 --- a/vendor/github.com/boltdb/bolt/bolt_amd64.go +++ /dev/null @@ -1,7 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/boltdb/bolt/bolt_arm.go deleted file mode 100644 index e659bfb91f33..000000000000 --- a/vendor/github.com/boltdb/bolt/bolt_arm.go +++ /dev/null @@ -1,7 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/boltdb/bolt/bolt_arm64.go deleted file mode 100644 index 6d2309352e06..000000000000 --- a/vendor/github.com/boltdb/bolt/bolt_arm64.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build arm64 - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/boltdb/bolt/bolt_ppc64.go deleted file mode 100644 index 2dc6be02e3e3..000000000000 --- a/vendor/github.com/boltdb/bolt/bolt_ppc64.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build ppc64 - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/boltdb/bolt/bolt_s390x.go deleted file mode 100644 index f4dd26bbba7c..000000000000 --- a/vendor/github.com/boltdb/bolt/bolt_s390x.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build s390x - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/github.com/boltdb/bolt/freelist.go b/vendor/github.com/boltdb/bolt/freelist.go deleted file mode 100644 index 1b7ba91b2a51..000000000000 --- a/vendor/github.com/boltdb/bolt/freelist.go +++ /dev/null @@ -1,248 +0,0 @@ -package bolt - -import ( - "fmt" - "sort" - "unsafe" -) - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - ids []pgid // all free and available free page ids. - pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. - cache map[pgid]bool // fast lookup of all free and pending page ids. -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist() *freelist { - return &freelist{ - pending: make(map[txid][]pgid), - cache: make(map[pgid]bool), - } -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// free_count returns count of free pages -func (f *freelist) free_count() int { - return len(f.ids) -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, list := range f.pending { - count += len(list) - } - return count -} - -// all returns a list of all free ids and all pending ids in one sorted list. -func (f *freelist) all() []pgid { - m := make(pgids, 0) - - for _, list := range f.pending { - m = append(m, list...) - } - - sort.Sort(m) - return pgids(f.ids).merge(m) -} - -// allocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) allocate(n int) pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, initial+i) - } - - return initial - } - - previd = id - } - return 0 -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) - } - - // Free page and all its overflow pages. - var ids = f.pending[txid] - for id := p.id; id <= p.id+pgid(p.overflow); id++ { - // Verify that page is not already free. - if f.cache[id] { - panic(fmt.Sprintf("page %d already freed", id)) - } - - // Add to the freelist and cache. - ids = append(ids, id) - f.cache[id] = true - } - f.pending[txid] = ids -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) - for tid, ids := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, ids...) - delete(f.pending, tid) - } - } - sort.Sort(m) - f.ids = pgids(f.ids).merge(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { - // Remove page ids from cache. - for _, id := range f.pending[txid] { - delete(f.cache, id) - } - - // Remove pages from pending list. - delete(f.pending, txid) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgid pgid) bool { - return f.cache[pgid] -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - idx, count := 0, int(p.count) - if count == 0xFFFF { - idx = 1 - count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) - } - - // Copy the list of page ids from the freelist. - if count == 0 { - f.ids = nil - } else { - ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] - f.ids = make([]pgid, len(ids)) - copy(f.ids, ids) - - // Make sure they're sorted. - sort.Sort(pgids(f.ids)) - } - - // Rebuild the page cache. - f.reindex() -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - ids := f.all() - - // Update the header flag. - p.flags |= freelistPageFlag - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - if len(ids) == 0 { - p.count = uint16(len(ids)) - } else if len(ids) < 0xFFFF { - p.count = uint16(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) - } else { - p.count = 0xFFFF - ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range f.ids { - if !pcache[id] { - a = append(a, id) - } - } - f.ids = a - - // Once the available list is rebuilt then rebuild the free cache so that - // it includes the available and pending free pages. - f.reindex() -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - f.cache = make(map[pgid]bool) - for _, id := range f.ids { - f.cache[id] = true - } - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { - f.cache[pendingID] = true - } - } -} diff --git a/vendor/github.com/boltdb/bolt/node.go b/vendor/github.com/boltdb/bolt/node.go deleted file mode 100644 index 159318b229cb..000000000000 --- a/vendor/github.com/boltdb/bolt/node.go +++ /dev/null @@ -1,604 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "sort" - "unsafe" -) - -// node represents an in-memory, deserialized page. -type node struct { - bucket *Bucket - isLeaf bool - unbalanced bool - spilled bool - key []byte - pgid pgid - parent *node - children nodes - inodes inodes -} - -// root returns the top-level node this node is attached to. -func (n *node) root() *node { - if n.parent == nil { - return n - } - return n.parent.root() -} - -// minKeys returns the minimum number of inodes this node should have. -func (n *node) minKeys() int { - if n.isLeaf { - return 1 - } - return 2 -} - -// size returns the size of the node after serialization. -func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) - } - return sz -} - -// sizeLessThan returns true if the node is less than a given size. -// This is an optimization to avoid calculating a large node when we only need -// to know if it fits inside a certain page size. -func (n *node) sizeLessThan(v int) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) - if sz >= v { - return false - } - } - return true -} - -// pageElementSize returns the size of each page element based on the type of node. -func (n *node) pageElementSize() int { - if n.isLeaf { - return leafPageElementSize - } - return branchPageElementSize -} - -// childAt returns the child node at a given index. -func (n *node) childAt(index int) *node { - if n.isLeaf { - panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) - } - return n.bucket.node(n.inodes[index].pgid, n) -} - -// childIndex returns the index of a given child node. -func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) - return index -} - -// numChildren returns the number of children. -func (n *node) numChildren() int { - return len(n.inodes) -} - -// nextSibling returns the next node with the same parent. -func (n *node) nextSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index >= n.parent.numChildren()-1 { - return nil - } - return n.parent.childAt(index + 1) -} - -// prevSibling returns the previous node with the same parent. -func (n *node) prevSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index == 0 { - return nil - } - return n.parent.childAt(index - 1) -} - -// put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { - if pgid >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) - } else if len(oldKey) <= 0 { - panic("put: zero-length old key") - } else if len(newKey) <= 0 { - panic("put: zero-length new key") - } - - // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) - - // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) - if !exact { - n.inodes = append(n.inodes, inode{}) - copy(n.inodes[index+1:], n.inodes[index:]) - } - - inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgid - _assert(len(inode.key) > 0, "put: zero-length inode key") -} - -// del removes a key from the node. -func (n *node) del(key []byte) { - // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) - - // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { - return - } - - // Delete inode from the node. - n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) - - // Mark the node as needing rebalancing. - n.unbalanced = true -} - -// read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) - - for i := 0; i < int(p.count); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() - } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() - } - _assert(len(inode.key) > 0, "read: zero-length inode key") - } - - // Save first key so we can find the node in the parent when we spill. - if len(n.inodes) > 0 { - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") - } else { - n.key = nil - } -} - -// write writes the items onto one or more pages. -func (n *node) write(p *page) { - // Initialize page. - if n.isLeaf { - p.flags |= leafPageFlag - } else { - p.flags |= branchPageFlag - } - - if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) - } - p.count = uint16(len(n.inodes)) - - // Stop here if there are no items to write. - if p.count == 0 { - return - } - - // Loop over each item and write it to the page. - b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] - for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") - - // Write the page element. - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) - } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") - } - - // If the length of key+value is larger than the max allocation size - // then we need to reallocate the byte array pointer. - // - // See: https://github.com/boltdb/bolt/pull/335 - klen, vlen := len(item.key), len(item.value) - if len(b) < klen+vlen { - b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] - } - - // Write data for the element to the end of the page. - copy(b[0:], item.key) - b = b[klen:] - copy(b[0:], item.value) - b = b[vlen:] - } - - // DEBUG ONLY: n.dump() -} - -// split breaks up a node into multiple smaller nodes, if appropriate. -// This should only be called from the spill() function. -func (n *node) split(pageSize int) []*node { - var nodes []*node - - node := n - for { - // Split node into two. - a, b := node.splitTwo(pageSize) - nodes = append(nodes, a) - - // If we can't split then exit the loop. - if b == nil { - break - } - - // Set node to b so it gets split on the next iteration. - node = b - } - - return nodes -} - -// splitTwo breaks up a node into two smaller nodes, if appropriate. -// This should only be called from the split() function. -func (n *node) splitTwo(pageSize int) (*node, *node) { - // Ignore the split if the page doesn't have at least enough nodes for - // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { - return n, nil - } - - // Determine the threshold before starting a new node. - var fillPercent = n.bucket.FillPercent - if fillPercent < minFillPercent { - fillPercent = minFillPercent - } else if fillPercent > maxFillPercent { - fillPercent = maxFillPercent - } - threshold := int(float64(pageSize) * fillPercent) - - // Determine split position and sizes of the two pages. - splitIndex, _ := n.splitIndex(threshold) - - // Split node into two separate nodes. - // If there's no parent then we'll need to create one. - if n.parent == nil { - n.parent = &node{bucket: n.bucket, children: []*node{n}} - } - - // Create a new node and add it to the parent. - next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} - n.parent.children = append(n.parent.children, next) - - // Split inodes across two nodes. - next.inodes = n.inodes[splitIndex:] - n.inodes = n.inodes[:splitIndex] - - // Update the statistics. - n.bucket.tx.stats.Split++ - - return n, next -} - -// splitIndex finds the position where a page will fill a given threshold. -// It returns the index as well as the size of the first page. -// This is only be called from split(). -func (n *node) splitIndex(threshold int) (index, sz int) { - sz = pageHeaderSize - - // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { - index = i - inode := n.inodes[i] - elsize := n.pageElementSize() + len(inode.key) + len(inode.value) - - // If we have at least the minimum number of keys and adding another - // node would put us over the threshold then exit and return. - if i >= minKeysPerPage && sz+elsize > threshold { - break - } - - // Add the element size to the total size. - sz += elsize - } - - return -} - -// spill writes the nodes to dirty pages and splits nodes as it goes. -// Returns an error if dirty pages cannot be allocated. -func (n *node) spill() error { - var tx = n.bucket.tx - if n.spilled { - return nil - } - - // Spill child nodes first. Child nodes can materialize sibling nodes in - // the case of split-merge so we cannot use a range loop. We have to check - // the children size on every loop iteration. - sort.Sort(n.children) - for i := 0; i < len(n.children); i++ { - if err := n.children[i].spill(); err != nil { - return err - } - } - - // We no longer need the child list because it's only used for spill tracking. - n.children = nil - - // Split nodes into appropriate sizes. The first node will always be n. - var nodes = n.split(tx.db.pageSize) - for _, node := range nodes { - // Add node's page to the freelist if it's not new. - if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) - node.pgid = 0 - } - - // Allocate contiguous space for the node. - p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) - if err != nil { - return err - } - - // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) - } - node.pgid = p.id - node.write(p) - node.spilled = true - - // Insert into parent inodes. - if node.parent != nil { - var key = node.key - if key == nil { - key = node.inodes[0].key - } - - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") - } - - // Update the statistics. - tx.stats.Spill++ - } - - // If the root node split and created a new root then we need to spill that - // as well. We'll clear out the children to make sure it doesn't try to respill. - if n.parent != nil && n.parent.pgid == 0 { - n.children = nil - return n.parent.spill() - } - - return nil -} - -// rebalance attempts to combine the node with sibling nodes if the node fill -// size is below a threshold or if there are not enough keys. -func (n *node) rebalance() { - if !n.unbalanced { - return - } - n.unbalanced = false - - // Update statistics. - n.bucket.tx.stats.Rebalance++ - - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 - if n.size() > threshold && len(n.inodes) > n.minKeys() { - return - } - - // Root node has special handling. - if n.parent == nil { - // If root node is a branch and only has one node then collapse it. - if !n.isLeaf && len(n.inodes) == 1 { - // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) - n.isLeaf = child.isLeaf - n.inodes = child.inodes[:] - n.children = child.children - - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent = n - } - } - - // Remove old child. - child.parent = nil - delete(n.bucket.nodes, child.pgid) - child.free() - } - - return - } - - // If node has no keys then just remove it. - if n.numChildren() == 0 { - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - n.parent.rebalance() - return - } - - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) - if useNextSibling { - target = n.nextSibling() - } else { - target = n.prevSibling() - } - - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - } - - // Either this node or the target node was deleted from the parent so rebalance it. - n.parent.rebalance() -} - -// removes a node from the list of in-memory children. -// This does not affect the inodes. -func (n *node) removeChild(target *node) { - for i, child := range n.children { - if child == target { - n.children = append(n.children[:i], n.children[i+1:]...) - return - } - } -} - -// dereference causes the node to copy all its inode key/value references to heap memory. -// This is required when the mmap is reallocated so inodes are not pointing to stale data. -func (n *node) dereference() { - if n.key != nil { - key := make([]byte, len(n.key)) - copy(key, n.key) - n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") - } - - for i := range n.inodes { - inode := &n.inodes[i] - - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") - - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value - } - - // Recursively dereference children. - for _, child := range n.children { - child.dereference() - } - - // Update statistics. - n.bucket.tx.stats.NodeDeref++ -} - -// free adds the node's underlying page to the freelist. -func (n *node) free() { - if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) - n.pgid = 0 - } -} - -// dump writes the contents of the node to STDERR for debugging purposes. -/* -func (n *node) dump() { - // Write node header. - var typ = "branch" - if n.isLeaf { - typ = "leaf" - } - warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) - - // Write out abbreviated version of each item. - for _, item := range n.inodes { - if n.isLeaf { - if item.flags&bucketLeafFlag != 0 { - bucket := (*bucket)(unsafe.Pointer(&item.value[0])) - warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) - } else { - warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) - } - } else { - warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) - } - } - warn("") -} -*/ - -type nodes []*node - -func (s nodes) Len() int { return len(s) } -func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid pgid - key []byte - value []byte -} - -type inodes []inode diff --git a/vendor/github.com/cockroachdb/cmux/.travis.yml b/vendor/github.com/cockroachdb/cmux/.travis.yml index 9343f8d2c9d7..e73780f2eb0b 100644 --- a/vendor/github.com/cockroachdb/cmux/.travis.yml +++ b/vendor/github.com/cockroachdb/cmux/.travis.yml @@ -1,29 +1,22 @@ language: go go: + - 1.3 + - 1.4 - 1.5 - 1.6 - - 1.7 - - tip - -matrix: - allow_failures: - - go: tip gobuild_args: -race before_install: - - if [[ $TRAVIS_GO_VERSION == 1.7* ]]; then go get -u github.com/kisielk/errcheck; fi - - if [[ $TRAVIS_GO_VERSION == 1.7* ]]; then go get -u github.com/golang/lint/golint; fi + - go get -u github.com/golang/lint/golint + - if [[ $TRAVIS_GO_VERSION == 1.5* ]]; then go get -u github.com/kisielk/errcheck; fi + - go get -u golang.org/x/tools/cmd/vet before_script: - '! gofmt -s -l . | read' + - golint ./... - echo $TRAVIS_GO_VERSION - - if [[ $TRAVIS_GO_VERSION == 1.7* ]]; then golint ./...; fi - - if [[ $TRAVIS_GO_VERSION == 1.7* ]]; then errcheck ./...; fi - - if [[ $TRAVIS_GO_VERSION == 1.7* ]]; then go tool vet .; fi - - if [[ $TRAVIS_GO_VERSION == 1.7* ]]; then go tool vet --shadow .; fi - -script: - - go test -bench . -v ./... - - go test -race -bench . -v ./... + - if [[ $TRAVIS_GO_VERSION == 1.5* ]]; then errcheck ./...; fi + - go vet . + - go tool vet --shadow . diff --git a/vendor/github.com/cockroachdb/cmux/CONTRIBUTORS b/vendor/github.com/cockroachdb/cmux/CONTRIBUTORS deleted file mode 100644 index 1b73178547c1..000000000000 --- a/vendor/github.com/cockroachdb/cmux/CONTRIBUTORS +++ /dev/null @@ -1,11 +0,0 @@ -# The list of people who have contributed code to the cmux repository. -# -# Auto-generated with: -# git log --oneline --pretty=format:'%an <%aE>' | sort -u -# -Dmitri Shuralyov -Ethan Mosbaugh -Soheil Hassas Yeganeh -Soheil Hassas Yeganeh -Tamir Duberstein -Tamir Duberstein diff --git a/vendor/github.com/cockroachdb/cmux/README.md b/vendor/github.com/cockroachdb/cmux/README.md index 26e1737d731e..b3713da5876a 100644 --- a/vendor/github.com/cockroachdb/cmux/README.md +++ b/vendor/github.com/cockroachdb/cmux/README.md @@ -63,10 +63,3 @@ example of this approach. when it's accepted. For example, one connection can be either gRPC or REST, but not both. That is, we assume that a client connection is either used for gRPC or REST. - -# Copyright and License -Copyright 2016 The CMux Authors. All rights reserved. - -See [CONTRIBUTORS](https://github.com/cockroachdb/cmux/blob/master/CONTRIBUTORS) -for the CMux Authors. Code is released under -[the Apache 2 license](https://github.com/cockroachdb/cmux/blob/master/LICENSE). diff --git a/vendor/github.com/cockroachdb/cmux/buffer.go b/vendor/github.com/cockroachdb/cmux/buffer.go index dc4d9921f01d..5c1785853634 100644 --- a/vendor/github.com/cockroachdb/cmux/buffer.go +++ b/vendor/github.com/cockroachdb/cmux/buffer.go @@ -1,17 +1,3 @@ -// Copyright 2016 The CMux Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - package cmux import ( diff --git a/vendor/github.com/cockroachdb/cmux/cmux.go b/vendor/github.com/cockroachdb/cmux/cmux.go index f9787fdfdca7..89cc910b024e 100644 --- a/vendor/github.com/cockroachdb/cmux/cmux.go +++ b/vendor/github.com/cockroachdb/cmux/cmux.go @@ -1,17 +1,3 @@ -// Copyright 2016 The CMux Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - package cmux import ( diff --git a/vendor/github.com/cockroachdb/cmux/doc.go b/vendor/github.com/cockroachdb/cmux/doc.go deleted file mode 100644 index aaa8f3158998..000000000000 --- a/vendor/github.com/cockroachdb/cmux/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2016 The CMux Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - -// Package cmux is a library to multiplex network connections based on -// their payload. Using cmux, you can serve different protocols from the -// same listener. -package cmux diff --git a/vendor/github.com/cockroachdb/cmux/matchers.go b/vendor/github.com/cockroachdb/cmux/matchers.go index 939909818934..abc30f6e0ad0 100644 --- a/vendor/github.com/cockroachdb/cmux/matchers.go +++ b/vendor/github.com/cockroachdb/cmux/matchers.go @@ -1,17 +1,3 @@ -// Copyright 2016 The CMux Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - package cmux import ( diff --git a/vendor/github.com/cockroachdb/cmux/patricia.go b/vendor/github.com/cockroachdb/cmux/patricia.go index c3e3d85bdeaf..56ec4e7b287c 100644 --- a/vendor/github.com/cockroachdb/cmux/patricia.go +++ b/vendor/github.com/cockroachdb/cmux/patricia.go @@ -1,17 +1,3 @@ -// Copyright 2016 The CMux Authors. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -// implied. See the License for the specific language governing -// permissions and limitations under the License. - package cmux import ( @@ -22,20 +8,12 @@ import ( // patriciaTree is a simple patricia tree that handles []byte instead of string // and cannot be changed after instantiation. type patriciaTree struct { - root *ptNode - maxDepth int // max depth of the tree. + root *ptNode } -func newPatriciaTree(bs ...[]byte) *patriciaTree { - max := 0 - for _, b := range bs { - if max < len(b) { - max = len(b) - } - } +func newPatriciaTree(b ...[]byte) *patriciaTree { return &patriciaTree{ - root: newNode(bs), - maxDepth: max + 1, + root: newNode(b), } } @@ -44,19 +22,17 @@ func newPatriciaTreeString(strs ...string) *patriciaTree { for i, s := range strs { b[i] = []byte(s) } - return newPatriciaTree(b...) + return &patriciaTree{ + root: newNode(b), + } } func (t *patriciaTree) matchPrefix(r io.Reader) bool { - buf := make([]byte, t.maxDepth) - n, _ := io.ReadFull(r, buf) - return t.root.match(buf[:n], true) + return t.root.match(r, true) } func (t *patriciaTree) match(r io.Reader) bool { - buf := make([]byte, t.maxDepth) - n, _ := io.ReadFull(r, buf) - return t.root.match(buf[:n], false) + return t.root.match(r, false) } type ptNode struct { @@ -146,34 +122,52 @@ func splitPrefix(bss [][]byte) (prefix []byte, rest [][]byte) { return prefix, rest } -func (n *ptNode) match(b []byte, prefix bool) bool { - l := len(n.prefix) - if l > 0 { - if l > len(b) { - l = len(b) +func readBytes(r io.Reader, n int) (b []byte, err error) { + b = make([]byte, n) + o := 0 + for o < n { + nr, err := r.Read(b[o:]) + if err != nil && err != io.EOF { + return b, err } - if !bytes.Equal(b[:l], n.prefix) { + + o += nr + + if err == io.EOF { + break + } + } + return b[:o], nil +} + +func (n *ptNode) match(r io.Reader, prefix bool) bool { + if l := len(n.prefix); l > 0 { + b, err := readBytes(r, l) + if err != nil || len(b) != l || !bytes.Equal(b, n.prefix) { return false } } - if n.terminal && (prefix || len(n.prefix) == len(b)) { + if prefix && n.terminal { return true } - if l >= len(b) { - return false - } + b := make([]byte, 1) + for { + nr, err := r.Read(b) + if nr != 0 { + break + } - nextN, ok := n.next[b[l]] - if !ok { - return false - } + if err == io.EOF { + return n.terminal + } - if l == len(b) { - b = b[l:l] - } else { - b = b[l+1:] + if err != nil { + return false + } } - return nextN.match(b, prefix) + + nextN, ok := n.next[b[0]] + return ok && nextN.match(r, prefix) } diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go b/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go deleted file mode 100644 index 6158a4870cc8..000000000000 --- a/vendor/github.com/container-storage-interface/spec/lib/go/csi/csi.pb.go +++ /dev/null @@ -1,2525 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: csi.proto - -/* -Package csi is a generated protocol buffer package. - -It is generated from these files: - csi.proto - -It has these top-level messages: - GetSupportedVersionsRequest - GetSupportedVersionsResponse - Version - GetPluginInfoRequest - GetPluginInfoResponse - CreateVolumeRequest - CreateVolumeResponse - VolumeCapability - CapacityRange - VolumeInfo - DeleteVolumeRequest - DeleteVolumeResponse - ControllerPublishVolumeRequest - ControllerPublishVolumeResponse - ControllerUnpublishVolumeRequest - ControllerUnpublishVolumeResponse - ValidateVolumeCapabilitiesRequest - ValidateVolumeCapabilitiesResponse - ListVolumesRequest - ListVolumesResponse - GetCapacityRequest - GetCapacityResponse - ControllerProbeRequest - ControllerProbeResponse - ControllerGetCapabilitiesRequest - ControllerGetCapabilitiesResponse - ControllerServiceCapability - NodePublishVolumeRequest - NodePublishVolumeResponse - NodeUnpublishVolumeRequest - NodeUnpublishVolumeResponse - GetNodeIDRequest - GetNodeIDResponse - NodeProbeRequest - NodeProbeResponse - NodeGetCapabilitiesRequest - NodeGetCapabilitiesResponse - NodeServiceCapability -*/ -package csi - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type VolumeCapability_AccessMode_Mode int32 - -const ( - VolumeCapability_AccessMode_UNKNOWN VolumeCapability_AccessMode_Mode = 0 - // Can be published as read/write at one node at a time. - VolumeCapability_AccessMode_SINGLE_NODE_WRITER VolumeCapability_AccessMode_Mode = 1 - // Can be published as readonly at one node at a time. - VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 2 - // Can be published as readonly at multiple nodes simultaneously. - VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 3 - // Can be published at multiple nodes simultaneously. Only one of - // the node can be used as read/write. The rest will be readonly. - VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER VolumeCapability_AccessMode_Mode = 4 - // Can be published as read/write at multiple nodes - // simultaneously. - VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER VolumeCapability_AccessMode_Mode = 5 -) - -var VolumeCapability_AccessMode_Mode_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SINGLE_NODE_WRITER", - 2: "SINGLE_NODE_READER_ONLY", - 3: "MULTI_NODE_READER_ONLY", - 4: "MULTI_NODE_SINGLE_WRITER", - 5: "MULTI_NODE_MULTI_WRITER", -} -var VolumeCapability_AccessMode_Mode_value = map[string]int32{ - "UNKNOWN": 0, - "SINGLE_NODE_WRITER": 1, - "SINGLE_NODE_READER_ONLY": 2, - "MULTI_NODE_READER_ONLY": 3, - "MULTI_NODE_SINGLE_WRITER": 4, - "MULTI_NODE_MULTI_WRITER": 5, -} - -func (x VolumeCapability_AccessMode_Mode) String() string { - return proto.EnumName(VolumeCapability_AccessMode_Mode_name, int32(x)) -} -func (VolumeCapability_AccessMode_Mode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{7, 2, 0} -} - -type ControllerServiceCapability_RPC_Type int32 - -const ( - ControllerServiceCapability_RPC_UNKNOWN ControllerServiceCapability_RPC_Type = 0 - ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME ControllerServiceCapability_RPC_Type = 1 - ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME ControllerServiceCapability_RPC_Type = 2 - ControllerServiceCapability_RPC_LIST_VOLUMES ControllerServiceCapability_RPC_Type = 3 - ControllerServiceCapability_RPC_GET_CAPACITY ControllerServiceCapability_RPC_Type = 4 -) - -var ControllerServiceCapability_RPC_Type_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CREATE_DELETE_VOLUME", - 2: "PUBLISH_UNPUBLISH_VOLUME", - 3: "LIST_VOLUMES", - 4: "GET_CAPACITY", -} -var ControllerServiceCapability_RPC_Type_value = map[string]int32{ - "UNKNOWN": 0, - "CREATE_DELETE_VOLUME": 1, - "PUBLISH_UNPUBLISH_VOLUME": 2, - "LIST_VOLUMES": 3, - "GET_CAPACITY": 4, -} - -func (x ControllerServiceCapability_RPC_Type) String() string { - return proto.EnumName(ControllerServiceCapability_RPC_Type_name, int32(x)) -} -func (ControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{26, 0, 0} -} - -type NodeServiceCapability_RPC_Type int32 - -const ( - NodeServiceCapability_RPC_UNKNOWN NodeServiceCapability_RPC_Type = 0 -) - -var NodeServiceCapability_RPC_Type_name = map[int32]string{ - 0: "UNKNOWN", -} -var NodeServiceCapability_RPC_Type_value = map[string]int32{ - "UNKNOWN": 0, -} - -func (x NodeServiceCapability_RPC_Type) String() string { - return proto.EnumName(NodeServiceCapability_RPC_Type_name, int32(x)) -} -func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{37, 0, 0} -} - -// ////// -// ////// -type GetSupportedVersionsRequest struct { -} - -func (m *GetSupportedVersionsRequest) Reset() { *m = GetSupportedVersionsRequest{} } -func (m *GetSupportedVersionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetSupportedVersionsRequest) ProtoMessage() {} -func (*GetSupportedVersionsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type GetSupportedVersionsResponse struct { - // All the CSI versions that the Plugin supports. This field is - // REQUIRED. - SupportedVersions []*Version `protobuf:"bytes,1,rep,name=supported_versions,json=supportedVersions" json:"supported_versions,omitempty"` -} - -func (m *GetSupportedVersionsResponse) Reset() { *m = GetSupportedVersionsResponse{} } -func (m *GetSupportedVersionsResponse) String() string { return proto.CompactTextString(m) } -func (*GetSupportedVersionsResponse) ProtoMessage() {} -func (*GetSupportedVersionsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *GetSupportedVersionsResponse) GetSupportedVersions() []*Version { - if m != nil { - return m.SupportedVersions - } - return nil -} - -// Specifies a version in Semantic Version 2.0 format. -// (http://semver.org/spec/v2.0.0.html) -type Version struct { - Major uint32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` - Minor uint32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` - Patch uint32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` -} - -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *Version) GetMajor() uint32 { - if m != nil { - return m.Major - } - return 0 -} - -func (m *Version) GetMinor() uint32 { - if m != nil { - return m.Minor - } - return 0 -} - -func (m *Version) GetPatch() uint32 { - if m != nil { - return m.Patch - } - return 0 -} - -// ////// -// ////// -type GetPluginInfoRequest struct { - // The API version assumed by the CO. This is a REQUIRED field. - Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` -} - -func (m *GetPluginInfoRequest) Reset() { *m = GetPluginInfoRequest{} } -func (m *GetPluginInfoRequest) String() string { return proto.CompactTextString(m) } -func (*GetPluginInfoRequest) ProtoMessage() {} -func (*GetPluginInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *GetPluginInfoRequest) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -type GetPluginInfoResponse struct { - // The name MUST follow reverse domain name notation format - // (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). - // It SHOULD include the plugin's host company name and the plugin - // name, to minimize the possibility of collisions. It MUST be 63 - // characters or less, beginning and ending with an alphanumeric - // character ([a-z0-9A-Z]) with dashes (-), underscores (_), - // dots (.), and alphanumerics between. This field is REQUIRED. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // This field is REQUIRED. Value of this field is opaque to the CO. - VendorVersion string `protobuf:"bytes,2,opt,name=vendor_version,json=vendorVersion" json:"vendor_version,omitempty"` - // This field is OPTIONAL. Values are opaque to the CO. - Manifest map[string]string `protobuf:"bytes,3,rep,name=manifest" json:"manifest,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *GetPluginInfoResponse) Reset() { *m = GetPluginInfoResponse{} } -func (m *GetPluginInfoResponse) String() string { return proto.CompactTextString(m) } -func (*GetPluginInfoResponse) ProtoMessage() {} -func (*GetPluginInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -func (m *GetPluginInfoResponse) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *GetPluginInfoResponse) GetVendorVersion() string { - if m != nil { - return m.VendorVersion - } - return "" -} - -func (m *GetPluginInfoResponse) GetManifest() map[string]string { - if m != nil { - return m.Manifest - } - return nil -} - -// ////// -// ////// -type CreateVolumeRequest struct { - // The API version assumed by the CO. This field is REQUIRED. - Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` - // The suggested name for the storage space. This field is REQUIRED. - // It serves two purposes: - // 1) Idempotency - This name is generated by the CO to achieve - // idempotency. If `CreateVolume` fails, the volume may or may not - // be provisioned. In this case, the CO may call `CreateVolume` - // again, with the same name, to ensure the volume exists. The - // Plugin should ensure that multiple `CreateVolume` calls for the - // same name do not result in more than one piece of storage - // provisioned corresponding to that name. If a Plugin is unable to - // enforce idempotency, the CO's error recovery logic could result - // in multiple (unused) volumes being provisioned. - // 2) Suggested name - Some storage systems allow callers to specify - // an identifier by which to refer to the newly provisioned - // storage. If a storage system supports this, it can optionally - // use this name as the identifier for the new volume. - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - // This field is OPTIONAL. This allows the CO to specify the capacity - // requirement of the volume to be provisioned. If not specified, the - // Plugin MAY choose an implementation-defined capacity range. - CapacityRange *CapacityRange `protobuf:"bytes,3,opt,name=capacity_range,json=capacityRange" json:"capacity_range,omitempty"` - // The capabilities that the provisioned volume MUST have: the Plugin - // MUST provision a volume that could satisfy ALL of the - // capabilities specified in this list. The Plugin MUST assume that - // the CO MAY use the provisioned volume later with ANY of the - // capabilities specified in this list. This also enables the CO to do - // early validation: if ANY of the specified volume capabilities are - // not supported by the Plugin, the call SHALL fail. This field is - // REQUIRED. - VolumeCapabilities []*VolumeCapability `protobuf:"bytes,4,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` - // Plugin specific parameters passed in as opaque key-value pairs. - // This field is OPTIONAL. The Plugin is responsible for parsing and - // validating these parameters. COs will treat these as opaque. - Parameters map[string]string `protobuf:"bytes,5,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // End user credentials used to authenticate/authorize volume creation - // request. - // This field contains credential data, for example username and - // password. Each key must consist of alphanumeric characters, '-', - // '_' or '.'. Each value MUST contain a valid string. An SP MAY - // choose to accept binary (non-string) data by using a binary-to-text - // encoding scheme, like base64. An SP SHALL advertise the - // requirements for credentials in documentation. COs SHALL permit - // users to pass through the required credentials. This information is - // sensitive and MUST be treated as such (not logged, etc.) by the CO. - // This field is OPTIONAL. - UserCredentials map[string]string `protobuf:"bytes,6,rep,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *CreateVolumeRequest) Reset() { *m = CreateVolumeRequest{} } -func (m *CreateVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*CreateVolumeRequest) ProtoMessage() {} -func (*CreateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *CreateVolumeRequest) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -func (m *CreateVolumeRequest) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *CreateVolumeRequest) GetCapacityRange() *CapacityRange { - if m != nil { - return m.CapacityRange - } - return nil -} - -func (m *CreateVolumeRequest) GetVolumeCapabilities() []*VolumeCapability { - if m != nil { - return m.VolumeCapabilities - } - return nil -} - -func (m *CreateVolumeRequest) GetParameters() map[string]string { - if m != nil { - return m.Parameters - } - return nil -} - -func (m *CreateVolumeRequest) GetUserCredentials() map[string]string { - if m != nil { - return m.UserCredentials - } - return nil -} - -type CreateVolumeResponse struct { - // Contains all attributes of the newly created volume that are - // relevant to the CO along with information required by the Plugin - // to uniquely identify the volume. This field is REQUIRED. - VolumeInfo *VolumeInfo `protobuf:"bytes,1,opt,name=volume_info,json=volumeInfo" json:"volume_info,omitempty"` -} - -func (m *CreateVolumeResponse) Reset() { *m = CreateVolumeResponse{} } -func (m *CreateVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*CreateVolumeResponse) ProtoMessage() {} -func (*CreateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -func (m *CreateVolumeResponse) GetVolumeInfo() *VolumeInfo { - if m != nil { - return m.VolumeInfo - } - return nil -} - -// Specify a capability of a volume. -type VolumeCapability struct { - // Specifies what API the volume will be accessed using. One of the - // following fields MUST be specified. - // - // Types that are valid to be assigned to AccessType: - // *VolumeCapability_Block - // *VolumeCapability_Mount - AccessType isVolumeCapability_AccessType `protobuf_oneof:"access_type"` - // This is a REQUIRED field. - AccessMode *VolumeCapability_AccessMode `protobuf:"bytes,3,opt,name=access_mode,json=accessMode" json:"access_mode,omitempty"` -} - -func (m *VolumeCapability) Reset() { *m = VolumeCapability{} } -func (m *VolumeCapability) String() string { return proto.CompactTextString(m) } -func (*VolumeCapability) ProtoMessage() {} -func (*VolumeCapability) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -type isVolumeCapability_AccessType interface { - isVolumeCapability_AccessType() -} - -type VolumeCapability_Block struct { - Block *VolumeCapability_BlockVolume `protobuf:"bytes,1,opt,name=block,oneof"` -} -type VolumeCapability_Mount struct { - Mount *VolumeCapability_MountVolume `protobuf:"bytes,2,opt,name=mount,oneof"` -} - -func (*VolumeCapability_Block) isVolumeCapability_AccessType() {} -func (*VolumeCapability_Mount) isVolumeCapability_AccessType() {} - -func (m *VolumeCapability) GetAccessType() isVolumeCapability_AccessType { - if m != nil { - return m.AccessType - } - return nil -} - -func (m *VolumeCapability) GetBlock() *VolumeCapability_BlockVolume { - if x, ok := m.GetAccessType().(*VolumeCapability_Block); ok { - return x.Block - } - return nil -} - -func (m *VolumeCapability) GetMount() *VolumeCapability_MountVolume { - if x, ok := m.GetAccessType().(*VolumeCapability_Mount); ok { - return x.Mount - } - return nil -} - -func (m *VolumeCapability) GetAccessMode() *VolumeCapability_AccessMode { - if m != nil { - return m.AccessMode - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*VolumeCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _VolumeCapability_OneofMarshaler, _VolumeCapability_OneofUnmarshaler, _VolumeCapability_OneofSizer, []interface{}{ - (*VolumeCapability_Block)(nil), - (*VolumeCapability_Mount)(nil), - } -} - -func _VolumeCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*VolumeCapability) - // access_type - switch x := m.AccessType.(type) { - case *VolumeCapability_Block: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Block); err != nil { - return err - } - case *VolumeCapability_Mount: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Mount); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("VolumeCapability.AccessType has unexpected type %T", x) - } - return nil -} - -func _VolumeCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*VolumeCapability) - switch tag { - case 1: // access_type.block - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(VolumeCapability_BlockVolume) - err := b.DecodeMessage(msg) - m.AccessType = &VolumeCapability_Block{msg} - return true, err - case 2: // access_type.mount - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(VolumeCapability_MountVolume) - err := b.DecodeMessage(msg) - m.AccessType = &VolumeCapability_Mount{msg} - return true, err - default: - return false, nil - } -} - -func _VolumeCapability_OneofSizer(msg proto.Message) (n int) { - m := msg.(*VolumeCapability) - // access_type - switch x := m.AccessType.(type) { - case *VolumeCapability_Block: - s := proto.Size(x.Block) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *VolumeCapability_Mount: - s := proto.Size(x.Mount) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -// Indicate that the volume will be accessed via the block device API. -type VolumeCapability_BlockVolume struct { -} - -func (m *VolumeCapability_BlockVolume) Reset() { *m = VolumeCapability_BlockVolume{} } -func (m *VolumeCapability_BlockVolume) String() string { return proto.CompactTextString(m) } -func (*VolumeCapability_BlockVolume) ProtoMessage() {} -func (*VolumeCapability_BlockVolume) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } - -// Indicate that the volume will be accessed via the filesystem API. -type VolumeCapability_MountVolume struct { - // The filesystem type. This field is OPTIONAL. - // An empty string is equal to an unspecified field value. - FsType string `protobuf:"bytes,1,opt,name=fs_type,json=fsType" json:"fs_type,omitempty"` - // The mount options that can be used for the volume. This field is - // OPTIONAL. `mount_flags` MAY contain sensitive information. - // Therefore, the CO and the Plugin MUST NOT leak this information - // to untrusted entities. The total size of this repeated field - // SHALL NOT exceed 4 KiB. - MountFlags []string `protobuf:"bytes,2,rep,name=mount_flags,json=mountFlags" json:"mount_flags,omitempty"` -} - -func (m *VolumeCapability_MountVolume) Reset() { *m = VolumeCapability_MountVolume{} } -func (m *VolumeCapability_MountVolume) String() string { return proto.CompactTextString(m) } -func (*VolumeCapability_MountVolume) ProtoMessage() {} -func (*VolumeCapability_MountVolume) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 1} } - -func (m *VolumeCapability_MountVolume) GetFsType() string { - if m != nil { - return m.FsType - } - return "" -} - -func (m *VolumeCapability_MountVolume) GetMountFlags() []string { - if m != nil { - return m.MountFlags - } - return nil -} - -// Specify how a volume can be accessed. -type VolumeCapability_AccessMode struct { - // This field is REQUIRED. - Mode VolumeCapability_AccessMode_Mode `protobuf:"varint,1,opt,name=mode,enum=csi.VolumeCapability_AccessMode_Mode" json:"mode,omitempty"` -} - -func (m *VolumeCapability_AccessMode) Reset() { *m = VolumeCapability_AccessMode{} } -func (m *VolumeCapability_AccessMode) String() string { return proto.CompactTextString(m) } -func (*VolumeCapability_AccessMode) ProtoMessage() {} -func (*VolumeCapability_AccessMode) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 2} } - -func (m *VolumeCapability_AccessMode) GetMode() VolumeCapability_AccessMode_Mode { - if m != nil { - return m.Mode - } - return VolumeCapability_AccessMode_UNKNOWN -} - -// The capacity of the storage space in bytes. To specify an exact size, -// `required_bytes` and `limit_bytes` can be set to the same value. At -// least one of the these fields MUST be specified. -type CapacityRange struct { - // Volume must be at least this big. This field is OPTIONAL. - // A value of 0 is equal to an unspecified field value. - RequiredBytes uint64 `protobuf:"varint,1,opt,name=required_bytes,json=requiredBytes" json:"required_bytes,omitempty"` - // Volume must not be bigger than this. This field is OPTIONAL. - // A value of 0 is equal to an unspecified field value. - LimitBytes uint64 `protobuf:"varint,2,opt,name=limit_bytes,json=limitBytes" json:"limit_bytes,omitempty"` -} - -func (m *CapacityRange) Reset() { *m = CapacityRange{} } -func (m *CapacityRange) String() string { return proto.CompactTextString(m) } -func (*CapacityRange) ProtoMessage() {} -func (*CapacityRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *CapacityRange) GetRequiredBytes() uint64 { - if m != nil { - return m.RequiredBytes - } - return 0 -} - -func (m *CapacityRange) GetLimitBytes() uint64 { - if m != nil { - return m.LimitBytes - } - return 0 -} - -// The information about a provisioned volume. -type VolumeInfo struct { - // The capacity of the volume in bytes. This field is OPTIONAL. If not - // set (value of 0), it indicates that the capacity of the volume is - // unknown (e.g., NFS share). - CapacityBytes uint64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes" json:"capacity_bytes,omitempty"` - // Contains identity information for the created volume. This field is - // REQUIRED. The identity information will be used by the CO in - // subsequent calls to refer to the provisioned volume. - Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` - // Attributes reflect static properties of a volume and MUST be passed - // to volume validation and publishing calls. - // Attributes SHALL be opaque to a CO. Attributes SHALL NOT be mutable - // and SHALL be safe for the CO to cache. Attributes SHOULD NOT - // contain sensitive information. Attributes MAY NOT uniquely identify - // a volume. A volume uniquely identified by `id` SHALL always report - // the same attributes. This field is OPTIONAL and when present MUST - // be passed to volume validation and publishing calls. - Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *VolumeInfo) Reset() { *m = VolumeInfo{} } -func (m *VolumeInfo) String() string { return proto.CompactTextString(m) } -func (*VolumeInfo) ProtoMessage() {} -func (*VolumeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } - -func (m *VolumeInfo) GetCapacityBytes() uint64 { - if m != nil { - return m.CapacityBytes - } - return 0 -} - -func (m *VolumeInfo) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *VolumeInfo) GetAttributes() map[string]string { - if m != nil { - return m.Attributes - } - return nil -} - -// ////// -// ////// -type DeleteVolumeRequest struct { - // The API version assumed by the CO. This field is REQUIRED. - Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` - // The ID of the volume to be deprovisioned. - // This field is REQUIRED. - VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // End user credentials used to authenticate/authorize volume deletion - // request. - // This field contains credential data, for example username and - // password. Each key must consist of alphanumeric characters, '-', - // '_' or '.'. Each value MUST contain a valid string. An SP MAY - // choose to accept binary (non-string) data by using a binary-to-text - // encoding scheme, like base64. An SP SHALL advertise the - // requirements for credentials in documentation. COs SHALL permit - // users to pass through the required credentials. This information is - // sensitive and MUST be treated as such (not logged, etc.) by the CO. - // This field is OPTIONAL. - UserCredentials map[string]string `protobuf:"bytes,3,rep,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *DeleteVolumeRequest) Reset() { *m = DeleteVolumeRequest{} } -func (m *DeleteVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteVolumeRequest) ProtoMessage() {} -func (*DeleteVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } - -func (m *DeleteVolumeRequest) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -func (m *DeleteVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *DeleteVolumeRequest) GetUserCredentials() map[string]string { - if m != nil { - return m.UserCredentials - } - return nil -} - -type DeleteVolumeResponse struct { -} - -func (m *DeleteVolumeResponse) Reset() { *m = DeleteVolumeResponse{} } -func (m *DeleteVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteVolumeResponse) ProtoMessage() {} -func (*DeleteVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } - -// ////// -// ////// -type ControllerPublishVolumeRequest struct { - // The API version assumed by the CO. This field is REQUIRED. - Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` - // The ID of the volume to be used on a node. - // This field is REQUIRED. - VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The ID of the node. This field is REQUIRED. The CO SHALL set this - // field to match the node ID returned by `GetNodeID`. - NodeId string `protobuf:"bytes,3,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` - // The capability of the volume the CO expects the volume to have. - // This is a REQUIRED field. - VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. - Readonly bool `protobuf:"varint,5,opt,name=readonly" json:"readonly,omitempty"` - // End user credentials used to authenticate/authorize controller - // publish request. - // This field contains credential data, for example username and - // password. Each key must consist of alphanumeric characters, '-', - // '_' or '.'. Each value MUST contain a valid string. An SP MAY - // choose to accept binary (non-string) data by using a binary-to-text - // encoding scheme, like base64. An SP SHALL advertise the - // requirements for credentials in documentation. COs SHALL permit - // users to pass through the required credentials. This information is - // sensitive and MUST be treated as such (not logged, etc.) by the CO. - // This field is OPTIONAL. - UserCredentials map[string]string `protobuf:"bytes,6,rep,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Attributes of the volume to be used on a node. This field is - // OPTIONAL and MUST match the attributes of the VolumeInfo identified - // by `volume_id`. - VolumeAttributes map[string]string `protobuf:"bytes,7,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *ControllerPublishVolumeRequest) Reset() { *m = ControllerPublishVolumeRequest{} } -func (m *ControllerPublishVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*ControllerPublishVolumeRequest) ProtoMessage() {} -func (*ControllerPublishVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } - -func (m *ControllerPublishVolumeRequest) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -func (m *ControllerPublishVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *ControllerPublishVolumeRequest) GetNodeId() string { - if m != nil { - return m.NodeId - } - return "" -} - -func (m *ControllerPublishVolumeRequest) GetVolumeCapability() *VolumeCapability { - if m != nil { - return m.VolumeCapability - } - return nil -} - -func (m *ControllerPublishVolumeRequest) GetReadonly() bool { - if m != nil { - return m.Readonly - } - return false -} - -func (m *ControllerPublishVolumeRequest) GetUserCredentials() map[string]string { - if m != nil { - return m.UserCredentials - } - return nil -} - -func (m *ControllerPublishVolumeRequest) GetVolumeAttributes() map[string]string { - if m != nil { - return m.VolumeAttributes - } - return nil -} - -type ControllerPublishVolumeResponse struct { - // The SP specific information that will be passed to the Plugin in - // the subsequent `NodePublishVolume` call for the given volume. - // This information is opaque to the CO. This field is OPTIONAL. - PublishVolumeInfo map[string]string `protobuf:"bytes,1,rep,name=publish_volume_info,json=publishVolumeInfo" json:"publish_volume_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *ControllerPublishVolumeResponse) Reset() { *m = ControllerPublishVolumeResponse{} } -func (m *ControllerPublishVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*ControllerPublishVolumeResponse) ProtoMessage() {} -func (*ControllerPublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{13} -} - -func (m *ControllerPublishVolumeResponse) GetPublishVolumeInfo() map[string]string { - if m != nil { - return m.PublishVolumeInfo - } - return nil -} - -// ////// -// ////// -type ControllerUnpublishVolumeRequest struct { - // The API version assumed by the CO. This field is REQUIRED. - Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` - // The ID of the volume. This field is REQUIRED. - VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The ID of the node. This field is OPTIONAL. The CO SHOULD set this - // field to match the node ID returned by `GetNodeID` or leave it - // unset. If the value is set, the SP MUST unpublish the volume from - // the specified node. If the value is unset, the SP MUST unpublish - // the volume from all nodes it is published to. - NodeId string `protobuf:"bytes,3,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` - // End user credentials used to authenticate/authorize controller - // unpublish request. - // This field contains credential data, for example username and - // password. Each key must consist of alphanumeric characters, '-', - // '_' or '.'. Each value MUST contain a valid string. An SP MAY - // choose to accept binary (non-string) data by using a binary-to-text - // encoding scheme, like base64. An SP SHALL advertise the - // requirements for credentials in documentation. COs SHALL permit - // users to pass through the required credentials. This information is - // sensitive and MUST be treated as such (not logged, etc.) by the CO. - // This field is OPTIONAL. - UserCredentials map[string]string `protobuf:"bytes,4,rep,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *ControllerUnpublishVolumeRequest) Reset() { *m = ControllerUnpublishVolumeRequest{} } -func (m *ControllerUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*ControllerUnpublishVolumeRequest) ProtoMessage() {} -func (*ControllerUnpublishVolumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{14} -} - -func (m *ControllerUnpublishVolumeRequest) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -func (m *ControllerUnpublishVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *ControllerUnpublishVolumeRequest) GetNodeId() string { - if m != nil { - return m.NodeId - } - return "" -} - -func (m *ControllerUnpublishVolumeRequest) GetUserCredentials() map[string]string { - if m != nil { - return m.UserCredentials - } - return nil -} - -type ControllerUnpublishVolumeResponse struct { -} - -func (m *ControllerUnpublishVolumeResponse) Reset() { *m = ControllerUnpublishVolumeResponse{} } -func (m *ControllerUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*ControllerUnpublishVolumeResponse) ProtoMessage() {} -func (*ControllerUnpublishVolumeResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{15} -} - -// ////// -// ////// -type ValidateVolumeCapabilitiesRequest struct { - // The API version assumed by the CO. This is a REQUIRED field. - Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` - // The ID of the volume to check. This field is REQUIRED. - VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The capabilities that the CO wants to check for the volume. This - // call SHALL return "supported" only if all the volume capabilities - // specified below are supported. This field is REQUIRED. - VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` - // Attributes of the volume to check. This field is OPTIONAL and MUST - // match the attributes of the VolumeInfo identified by `volume_id`. - VolumeAttributes map[string]string `protobuf:"bytes,4,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *ValidateVolumeCapabilitiesRequest) Reset() { *m = ValidateVolumeCapabilitiesRequest{} } -func (m *ValidateVolumeCapabilitiesRequest) String() string { return proto.CompactTextString(m) } -func (*ValidateVolumeCapabilitiesRequest) ProtoMessage() {} -func (*ValidateVolumeCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{16} -} - -func (m *ValidateVolumeCapabilitiesRequest) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -func (m *ValidateVolumeCapabilitiesRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *ValidateVolumeCapabilitiesRequest) GetVolumeCapabilities() []*VolumeCapability { - if m != nil { - return m.VolumeCapabilities - } - return nil -} - -func (m *ValidateVolumeCapabilitiesRequest) GetVolumeAttributes() map[string]string { - if m != nil { - return m.VolumeAttributes - } - return nil -} - -type ValidateVolumeCapabilitiesResponse struct { - // True if the Plugin supports the specified capabilities for the - // given volume. This field is REQUIRED. - Supported bool `protobuf:"varint,1,opt,name=supported" json:"supported,omitempty"` - // Message to the CO if `supported` above is false. This field is - // OPTIONAL. - // An empty string is equal to an unspecified field value. - Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` -} - -func (m *ValidateVolumeCapabilitiesResponse) Reset() { *m = ValidateVolumeCapabilitiesResponse{} } -func (m *ValidateVolumeCapabilitiesResponse) String() string { return proto.CompactTextString(m) } -func (*ValidateVolumeCapabilitiesResponse) ProtoMessage() {} -func (*ValidateVolumeCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{17} -} - -func (m *ValidateVolumeCapabilitiesResponse) GetSupported() bool { - if m != nil { - return m.Supported - } - return false -} - -func (m *ValidateVolumeCapabilitiesResponse) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -// ////// -// ////// -type ListVolumesRequest struct { - // The API version assumed by the CO. This field is REQUIRED. - Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` - // If specified (non-zero value), the Plugin MUST NOT return more - // entries than this number in the response. If the actual number of - // entries is more than this number, the Plugin MUST set `next_token` - // in the response which can be used to get the next page of entries - // in the subsequent `ListVolumes` call. This field is OPTIONAL. If - // not specified (zero value), it means there is no restriction on the - // number of entries that can be returned. - MaxEntries uint32 `protobuf:"varint,2,opt,name=max_entries,json=maxEntries" json:"max_entries,omitempty"` - // A token to specify where to start paginating. Set this field to - // `next_token` returned by a previous `ListVolumes` call to get the - // next page of entries. This field is OPTIONAL. - // An empty string is equal to an unspecified field value. - StartingToken string `protobuf:"bytes,3,opt,name=starting_token,json=startingToken" json:"starting_token,omitempty"` -} - -func (m *ListVolumesRequest) Reset() { *m = ListVolumesRequest{} } -func (m *ListVolumesRequest) String() string { return proto.CompactTextString(m) } -func (*ListVolumesRequest) ProtoMessage() {} -func (*ListVolumesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } - -func (m *ListVolumesRequest) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -func (m *ListVolumesRequest) GetMaxEntries() uint32 { - if m != nil { - return m.MaxEntries - } - return 0 -} - -func (m *ListVolumesRequest) GetStartingToken() string { - if m != nil { - return m.StartingToken - } - return "" -} - -type ListVolumesResponse struct { - Entries []*ListVolumesResponse_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` - // This token allows you to get the next page of entries for - // `ListVolumes` request. If the number of entries is larger than - // `max_entries`, use the `next_token` as a value for the - // `starting_token` field in the next `ListVolumes` request. This - // field is OPTIONAL. - // An empty string is equal to an unspecified field value. - NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken" json:"next_token,omitempty"` -} - -func (m *ListVolumesResponse) Reset() { *m = ListVolumesResponse{} } -func (m *ListVolumesResponse) String() string { return proto.CompactTextString(m) } -func (*ListVolumesResponse) ProtoMessage() {} -func (*ListVolumesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } - -func (m *ListVolumesResponse) GetEntries() []*ListVolumesResponse_Entry { - if m != nil { - return m.Entries - } - return nil -} - -func (m *ListVolumesResponse) GetNextToken() string { - if m != nil { - return m.NextToken - } - return "" -} - -type ListVolumesResponse_Entry struct { - VolumeInfo *VolumeInfo `protobuf:"bytes,1,opt,name=volume_info,json=volumeInfo" json:"volume_info,omitempty"` -} - -func (m *ListVolumesResponse_Entry) Reset() { *m = ListVolumesResponse_Entry{} } -func (m *ListVolumesResponse_Entry) String() string { return proto.CompactTextString(m) } -func (*ListVolumesResponse_Entry) ProtoMessage() {} -func (*ListVolumesResponse_Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } - -func (m *ListVolumesResponse_Entry) GetVolumeInfo() *VolumeInfo { - if m != nil { - return m.VolumeInfo - } - return nil -} - -// ////// -// ////// -type GetCapacityRequest struct { - // The API version assumed by the CO. This is a REQUIRED field. - Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` - // If specified, the Plugin SHALL report the capacity of the storage - // that can be used to provision volumes that satisfy ALL of the - // specified `volume_capabilities`. These are the same - // `volume_capabilities` the CO will use in `CreateVolumeRequest`. - // This field is OPTIONAL. - VolumeCapabilities []*VolumeCapability `protobuf:"bytes,2,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` - // If specified, the Plugin SHALL report the capacity of the storage - // that can be used to provision volumes with the given Plugin - // specific `parameters`. These are the same `parameters` the CO will - // use in `CreateVolumeRequest`. This field is OPTIONAL. - Parameters map[string]string `protobuf:"bytes,3,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *GetCapacityRequest) Reset() { *m = GetCapacityRequest{} } -func (m *GetCapacityRequest) String() string { return proto.CompactTextString(m) } -func (*GetCapacityRequest) ProtoMessage() {} -func (*GetCapacityRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } - -func (m *GetCapacityRequest) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -func (m *GetCapacityRequest) GetVolumeCapabilities() []*VolumeCapability { - if m != nil { - return m.VolumeCapabilities - } - return nil -} - -func (m *GetCapacityRequest) GetParameters() map[string]string { - if m != nil { - return m.Parameters - } - return nil -} - -type GetCapacityResponse struct { - // The available capacity of the storage that can be used to - // provision volumes. If `volume_capabilities` or `parameters` is - // specified in the request, the Plugin SHALL take those into - // consideration when calculating the available capacity of the - // storage. This field is REQUIRED. - AvailableCapacity uint64 `protobuf:"varint,1,opt,name=available_capacity,json=availableCapacity" json:"available_capacity,omitempty"` -} - -func (m *GetCapacityResponse) Reset() { *m = GetCapacityResponse{} } -func (m *GetCapacityResponse) String() string { return proto.CompactTextString(m) } -func (*GetCapacityResponse) ProtoMessage() {} -func (*GetCapacityResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } - -func (m *GetCapacityResponse) GetAvailableCapacity() uint64 { - if m != nil { - return m.AvailableCapacity - } - return 0 -} - -// ////// -// ////// -type ControllerProbeRequest struct { - // The API version assumed by the CO. This is a REQUIRED field. - Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` -} - -func (m *ControllerProbeRequest) Reset() { *m = ControllerProbeRequest{} } -func (m *ControllerProbeRequest) String() string { return proto.CompactTextString(m) } -func (*ControllerProbeRequest) ProtoMessage() {} -func (*ControllerProbeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } - -func (m *ControllerProbeRequest) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -type ControllerProbeResponse struct { -} - -func (m *ControllerProbeResponse) Reset() { *m = ControllerProbeResponse{} } -func (m *ControllerProbeResponse) String() string { return proto.CompactTextString(m) } -func (*ControllerProbeResponse) ProtoMessage() {} -func (*ControllerProbeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } - -// ////// -// ////// -type ControllerGetCapabilitiesRequest struct { - // The API version assumed by the CO. This is a REQUIRED field. - Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` -} - -func (m *ControllerGetCapabilitiesRequest) Reset() { *m = ControllerGetCapabilitiesRequest{} } -func (m *ControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } -func (*ControllerGetCapabilitiesRequest) ProtoMessage() {} -func (*ControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{24} -} - -func (m *ControllerGetCapabilitiesRequest) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -type ControllerGetCapabilitiesResponse struct { - // All the capabilities that the controller service supports. This - // field is OPTIONAL. - Capabilities []*ControllerServiceCapability `protobuf:"bytes,2,rep,name=capabilities" json:"capabilities,omitempty"` -} - -func (m *ControllerGetCapabilitiesResponse) Reset() { *m = ControllerGetCapabilitiesResponse{} } -func (m *ControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } -func (*ControllerGetCapabilitiesResponse) ProtoMessage() {} -func (*ControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{25} -} - -func (m *ControllerGetCapabilitiesResponse) GetCapabilities() []*ControllerServiceCapability { - if m != nil { - return m.Capabilities - } - return nil -} - -// Specifies a capability of the controller service. -type ControllerServiceCapability struct { - // Types that are valid to be assigned to Type: - // *ControllerServiceCapability_Rpc - Type isControllerServiceCapability_Type `protobuf_oneof:"type"` -} - -func (m *ControllerServiceCapability) Reset() { *m = ControllerServiceCapability{} } -func (m *ControllerServiceCapability) String() string { return proto.CompactTextString(m) } -func (*ControllerServiceCapability) ProtoMessage() {} -func (*ControllerServiceCapability) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } - -type isControllerServiceCapability_Type interface { - isControllerServiceCapability_Type() -} - -type ControllerServiceCapability_Rpc struct { - Rpc *ControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,oneof"` -} - -func (*ControllerServiceCapability_Rpc) isControllerServiceCapability_Type() {} - -func (m *ControllerServiceCapability) GetType() isControllerServiceCapability_Type { - if m != nil { - return m.Type - } - return nil -} - -func (m *ControllerServiceCapability) GetRpc() *ControllerServiceCapability_RPC { - if x, ok := m.GetType().(*ControllerServiceCapability_Rpc); ok { - return x.Rpc - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ControllerServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ControllerServiceCapability_OneofMarshaler, _ControllerServiceCapability_OneofUnmarshaler, _ControllerServiceCapability_OneofSizer, []interface{}{ - (*ControllerServiceCapability_Rpc)(nil), - } -} - -func _ControllerServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ControllerServiceCapability) - // type - switch x := m.Type.(type) { - case *ControllerServiceCapability_Rpc: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Rpc); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ControllerServiceCapability.Type has unexpected type %T", x) - } - return nil -} - -func _ControllerServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ControllerServiceCapability) - switch tag { - case 1: // type.rpc - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ControllerServiceCapability_RPC) - err := b.DecodeMessage(msg) - m.Type = &ControllerServiceCapability_Rpc{msg} - return true, err - default: - return false, nil - } -} - -func _ControllerServiceCapability_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ControllerServiceCapability) - // type - switch x := m.Type.(type) { - case *ControllerServiceCapability_Rpc: - s := proto.Size(x.Rpc) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type ControllerServiceCapability_RPC struct { - Type ControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,enum=csi.ControllerServiceCapability_RPC_Type" json:"type,omitempty"` -} - -func (m *ControllerServiceCapability_RPC) Reset() { *m = ControllerServiceCapability_RPC{} } -func (m *ControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) } -func (*ControllerServiceCapability_RPC) ProtoMessage() {} -func (*ControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{26, 0} -} - -func (m *ControllerServiceCapability_RPC) GetType() ControllerServiceCapability_RPC_Type { - if m != nil { - return m.Type - } - return ControllerServiceCapability_RPC_UNKNOWN -} - -// ////// -// ////// -type NodePublishVolumeRequest struct { - // The API version assumed by the CO. This is a REQUIRED field. - Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` - // The ID of the volume to publish. This field is REQUIRED. - VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The CO SHALL set this field to the value returned by - // `ControllerPublishVolume` if the corresponding Controller Plugin - // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be - // left unset if the corresponding Controller Plugin does not have - // this capability. This is an OPTIONAL field. - PublishVolumeInfo map[string]string `protobuf:"bytes,3,rep,name=publish_volume_info,json=publishVolumeInfo" json:"publish_volume_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // The path to which the volume will be published. It MUST be an - // absolute path in the root filesystem of the process serving this - // request. The CO SHALL ensure uniqueness of target_path per volume. - // This is a REQUIRED field. - TargetPath string `protobuf:"bytes,4,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` - // The capability of the volume the CO expects the volume to have. - // This is a REQUIRED field. - VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` - // Whether to publish the volume in readonly mode. This field is - // REQUIRED. - Readonly bool `protobuf:"varint,6,opt,name=readonly" json:"readonly,omitempty"` - // End user credentials used to authenticate/authorize node - // publish request. - // This field contains credential data, for example username and - // password. Each key must consist of alphanumeric characters, '-', - // '_' or '.'. Each value MUST contain a valid string. An SP MAY - // choose to accept binary (non-string) data by using a binary-to-text - // encoding scheme, like base64. An SP SHALL advertise the - // requirements for credentials in documentation. COs SHALL permit - // users to pass through the required credentials. This information is - // sensitive and MUST be treated as such (not logged, etc.) by the CO. - // This field is OPTIONAL. - UserCredentials map[string]string `protobuf:"bytes,7,rep,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Attributes of the volume to publish. This field is OPTIONAL and - // MUST match the attributes of the VolumeInfo identified by - // `volume_id`. - VolumeAttributes map[string]string `protobuf:"bytes,8,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *NodePublishVolumeRequest) Reset() { *m = NodePublishVolumeRequest{} } -func (m *NodePublishVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*NodePublishVolumeRequest) ProtoMessage() {} -func (*NodePublishVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } - -func (m *NodePublishVolumeRequest) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -func (m *NodePublishVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *NodePublishVolumeRequest) GetPublishVolumeInfo() map[string]string { - if m != nil { - return m.PublishVolumeInfo - } - return nil -} - -func (m *NodePublishVolumeRequest) GetTargetPath() string { - if m != nil { - return m.TargetPath - } - return "" -} - -func (m *NodePublishVolumeRequest) GetVolumeCapability() *VolumeCapability { - if m != nil { - return m.VolumeCapability - } - return nil -} - -func (m *NodePublishVolumeRequest) GetReadonly() bool { - if m != nil { - return m.Readonly - } - return false -} - -func (m *NodePublishVolumeRequest) GetUserCredentials() map[string]string { - if m != nil { - return m.UserCredentials - } - return nil -} - -func (m *NodePublishVolumeRequest) GetVolumeAttributes() map[string]string { - if m != nil { - return m.VolumeAttributes - } - return nil -} - -type NodePublishVolumeResponse struct { -} - -func (m *NodePublishVolumeResponse) Reset() { *m = NodePublishVolumeResponse{} } -func (m *NodePublishVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*NodePublishVolumeResponse) ProtoMessage() {} -func (*NodePublishVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } - -// ////// -// ////// -type NodeUnpublishVolumeRequest struct { - // The API version assumed by the CO. This is a REQUIRED field. - Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` - // The ID of the volume. This field is REQUIRED. - VolumeId string `protobuf:"bytes,2,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` - // The path at which the volume was published. It MUST be an absolute - // path in the root filesystem of the process serving this request. - // This is a REQUIRED field. - TargetPath string `protobuf:"bytes,3,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` - // End user credentials used to authenticate/authorize node - // unpublish request. - // This field contains credential data, for example username and - // password. Each key must consist of alphanumeric characters, '-', - // '_' or '.'. Each value MUST contain a valid string. An SP MAY - // choose to accept binary (non-string) data by using a binary-to-text - // encoding scheme, like base64. An SP SHALL advertise the - // requirements for credentials in documentation. COs SHALL permit - // users to pass through the required credentials. This information is - // sensitive and MUST be treated as such (not logged, etc.) by the CO. - // This field is OPTIONAL. - UserCredentials map[string]string `protobuf:"bytes,4,rep,name=user_credentials,json=userCredentials" json:"user_credentials,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` -} - -func (m *NodeUnpublishVolumeRequest) Reset() { *m = NodeUnpublishVolumeRequest{} } -func (m *NodeUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } -func (*NodeUnpublishVolumeRequest) ProtoMessage() {} -func (*NodeUnpublishVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } - -func (m *NodeUnpublishVolumeRequest) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -func (m *NodeUnpublishVolumeRequest) GetVolumeId() string { - if m != nil { - return m.VolumeId - } - return "" -} - -func (m *NodeUnpublishVolumeRequest) GetTargetPath() string { - if m != nil { - return m.TargetPath - } - return "" -} - -func (m *NodeUnpublishVolumeRequest) GetUserCredentials() map[string]string { - if m != nil { - return m.UserCredentials - } - return nil -} - -type NodeUnpublishVolumeResponse struct { -} - -func (m *NodeUnpublishVolumeResponse) Reset() { *m = NodeUnpublishVolumeResponse{} } -func (m *NodeUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } -func (*NodeUnpublishVolumeResponse) ProtoMessage() {} -func (*NodeUnpublishVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } - -// ////// -// ////// -type GetNodeIDRequest struct { - // The API version assumed by the CO. This is a REQUIRED field. - Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` -} - -func (m *GetNodeIDRequest) Reset() { *m = GetNodeIDRequest{} } -func (m *GetNodeIDRequest) String() string { return proto.CompactTextString(m) } -func (*GetNodeIDRequest) ProtoMessage() {} -func (*GetNodeIDRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } - -func (m *GetNodeIDRequest) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -type GetNodeIDResponse struct { - // The ID of the node as understood by the SP which SHALL be used by - // CO in subsequent `ControllerPublishVolume`. - // This is a REQUIRED field. - NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` -} - -func (m *GetNodeIDResponse) Reset() { *m = GetNodeIDResponse{} } -func (m *GetNodeIDResponse) String() string { return proto.CompactTextString(m) } -func (*GetNodeIDResponse) ProtoMessage() {} -func (*GetNodeIDResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } - -func (m *GetNodeIDResponse) GetNodeId() string { - if m != nil { - return m.NodeId - } - return "" -} - -// ////// -// ////// -type NodeProbeRequest struct { - // The API version assumed by the CO. This is a REQUIRED field. - Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` -} - -func (m *NodeProbeRequest) Reset() { *m = NodeProbeRequest{} } -func (m *NodeProbeRequest) String() string { return proto.CompactTextString(m) } -func (*NodeProbeRequest) ProtoMessage() {} -func (*NodeProbeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } - -func (m *NodeProbeRequest) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -type NodeProbeResponse struct { -} - -func (m *NodeProbeResponse) Reset() { *m = NodeProbeResponse{} } -func (m *NodeProbeResponse) String() string { return proto.CompactTextString(m) } -func (*NodeProbeResponse) ProtoMessage() {} -func (*NodeProbeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } - -// ////// -// ////// -type NodeGetCapabilitiesRequest struct { - // The API version assumed by the CO. This is a REQUIRED field. - Version *Version `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` -} - -func (m *NodeGetCapabilitiesRequest) Reset() { *m = NodeGetCapabilitiesRequest{} } -func (m *NodeGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } -func (*NodeGetCapabilitiesRequest) ProtoMessage() {} -func (*NodeGetCapabilitiesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } - -func (m *NodeGetCapabilitiesRequest) GetVersion() *Version { - if m != nil { - return m.Version - } - return nil -} - -type NodeGetCapabilitiesResponse struct { - // All the capabilities that the node service supports. This field - // is OPTIONAL. - Capabilities []*NodeServiceCapability `protobuf:"bytes,1,rep,name=capabilities" json:"capabilities,omitempty"` -} - -func (m *NodeGetCapabilitiesResponse) Reset() { *m = NodeGetCapabilitiesResponse{} } -func (m *NodeGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } -func (*NodeGetCapabilitiesResponse) ProtoMessage() {} -func (*NodeGetCapabilitiesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } - -func (m *NodeGetCapabilitiesResponse) GetCapabilities() []*NodeServiceCapability { - if m != nil { - return m.Capabilities - } - return nil -} - -// Specifies a capability of the node service. -type NodeServiceCapability struct { - // Types that are valid to be assigned to Type: - // *NodeServiceCapability_Rpc - Type isNodeServiceCapability_Type `protobuf_oneof:"type"` -} - -func (m *NodeServiceCapability) Reset() { *m = NodeServiceCapability{} } -func (m *NodeServiceCapability) String() string { return proto.CompactTextString(m) } -func (*NodeServiceCapability) ProtoMessage() {} -func (*NodeServiceCapability) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } - -type isNodeServiceCapability_Type interface { - isNodeServiceCapability_Type() -} - -type NodeServiceCapability_Rpc struct { - Rpc *NodeServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,oneof"` -} - -func (*NodeServiceCapability_Rpc) isNodeServiceCapability_Type() {} - -func (m *NodeServiceCapability) GetType() isNodeServiceCapability_Type { - if m != nil { - return m.Type - } - return nil -} - -func (m *NodeServiceCapability) GetRpc() *NodeServiceCapability_RPC { - if x, ok := m.GetType().(*NodeServiceCapability_Rpc); ok { - return x.Rpc - } - return nil -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*NodeServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _NodeServiceCapability_OneofMarshaler, _NodeServiceCapability_OneofUnmarshaler, _NodeServiceCapability_OneofSizer, []interface{}{ - (*NodeServiceCapability_Rpc)(nil), - } -} - -func _NodeServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*NodeServiceCapability) - // type - switch x := m.Type.(type) { - case *NodeServiceCapability_Rpc: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Rpc); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("NodeServiceCapability.Type has unexpected type %T", x) - } - return nil -} - -func _NodeServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*NodeServiceCapability) - switch tag { - case 1: // type.rpc - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(NodeServiceCapability_RPC) - err := b.DecodeMessage(msg) - m.Type = &NodeServiceCapability_Rpc{msg} - return true, err - default: - return false, nil - } -} - -func _NodeServiceCapability_OneofSizer(msg proto.Message) (n int) { - m := msg.(*NodeServiceCapability) - // type - switch x := m.Type.(type) { - case *NodeServiceCapability_Rpc: - s := proto.Size(x.Rpc) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -type NodeServiceCapability_RPC struct { - Type NodeServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,enum=csi.NodeServiceCapability_RPC_Type" json:"type,omitempty"` -} - -func (m *NodeServiceCapability_RPC) Reset() { *m = NodeServiceCapability_RPC{} } -func (m *NodeServiceCapability_RPC) String() string { return proto.CompactTextString(m) } -func (*NodeServiceCapability_RPC) ProtoMessage() {} -func (*NodeServiceCapability_RPC) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37, 0} } - -func (m *NodeServiceCapability_RPC) GetType() NodeServiceCapability_RPC_Type { - if m != nil { - return m.Type - } - return NodeServiceCapability_RPC_UNKNOWN -} - -func init() { - proto.RegisterType((*GetSupportedVersionsRequest)(nil), "csi.GetSupportedVersionsRequest") - proto.RegisterType((*GetSupportedVersionsResponse)(nil), "csi.GetSupportedVersionsResponse") - proto.RegisterType((*Version)(nil), "csi.Version") - proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.GetPluginInfoRequest") - proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.GetPluginInfoResponse") - proto.RegisterType((*CreateVolumeRequest)(nil), "csi.CreateVolumeRequest") - proto.RegisterType((*CreateVolumeResponse)(nil), "csi.CreateVolumeResponse") - proto.RegisterType((*VolumeCapability)(nil), "csi.VolumeCapability") - proto.RegisterType((*VolumeCapability_BlockVolume)(nil), "csi.VolumeCapability.BlockVolume") - proto.RegisterType((*VolumeCapability_MountVolume)(nil), "csi.VolumeCapability.MountVolume") - proto.RegisterType((*VolumeCapability_AccessMode)(nil), "csi.VolumeCapability.AccessMode") - proto.RegisterType((*CapacityRange)(nil), "csi.CapacityRange") - proto.RegisterType((*VolumeInfo)(nil), "csi.VolumeInfo") - proto.RegisterType((*DeleteVolumeRequest)(nil), "csi.DeleteVolumeRequest") - proto.RegisterType((*DeleteVolumeResponse)(nil), "csi.DeleteVolumeResponse") - proto.RegisterType((*ControllerPublishVolumeRequest)(nil), "csi.ControllerPublishVolumeRequest") - proto.RegisterType((*ControllerPublishVolumeResponse)(nil), "csi.ControllerPublishVolumeResponse") - proto.RegisterType((*ControllerUnpublishVolumeRequest)(nil), "csi.ControllerUnpublishVolumeRequest") - proto.RegisterType((*ControllerUnpublishVolumeResponse)(nil), "csi.ControllerUnpublishVolumeResponse") - proto.RegisterType((*ValidateVolumeCapabilitiesRequest)(nil), "csi.ValidateVolumeCapabilitiesRequest") - proto.RegisterType((*ValidateVolumeCapabilitiesResponse)(nil), "csi.ValidateVolumeCapabilitiesResponse") - proto.RegisterType((*ListVolumesRequest)(nil), "csi.ListVolumesRequest") - proto.RegisterType((*ListVolumesResponse)(nil), "csi.ListVolumesResponse") - proto.RegisterType((*ListVolumesResponse_Entry)(nil), "csi.ListVolumesResponse.Entry") - proto.RegisterType((*GetCapacityRequest)(nil), "csi.GetCapacityRequest") - proto.RegisterType((*GetCapacityResponse)(nil), "csi.GetCapacityResponse") - proto.RegisterType((*ControllerProbeRequest)(nil), "csi.ControllerProbeRequest") - proto.RegisterType((*ControllerProbeResponse)(nil), "csi.ControllerProbeResponse") - proto.RegisterType((*ControllerGetCapabilitiesRequest)(nil), "csi.ControllerGetCapabilitiesRequest") - proto.RegisterType((*ControllerGetCapabilitiesResponse)(nil), "csi.ControllerGetCapabilitiesResponse") - proto.RegisterType((*ControllerServiceCapability)(nil), "csi.ControllerServiceCapability") - proto.RegisterType((*ControllerServiceCapability_RPC)(nil), "csi.ControllerServiceCapability.RPC") - proto.RegisterType((*NodePublishVolumeRequest)(nil), "csi.NodePublishVolumeRequest") - proto.RegisterType((*NodePublishVolumeResponse)(nil), "csi.NodePublishVolumeResponse") - proto.RegisterType((*NodeUnpublishVolumeRequest)(nil), "csi.NodeUnpublishVolumeRequest") - proto.RegisterType((*NodeUnpublishVolumeResponse)(nil), "csi.NodeUnpublishVolumeResponse") - proto.RegisterType((*GetNodeIDRequest)(nil), "csi.GetNodeIDRequest") - proto.RegisterType((*GetNodeIDResponse)(nil), "csi.GetNodeIDResponse") - proto.RegisterType((*NodeProbeRequest)(nil), "csi.NodeProbeRequest") - proto.RegisterType((*NodeProbeResponse)(nil), "csi.NodeProbeResponse") - proto.RegisterType((*NodeGetCapabilitiesRequest)(nil), "csi.NodeGetCapabilitiesRequest") - proto.RegisterType((*NodeGetCapabilitiesResponse)(nil), "csi.NodeGetCapabilitiesResponse") - proto.RegisterType((*NodeServiceCapability)(nil), "csi.NodeServiceCapability") - proto.RegisterType((*NodeServiceCapability_RPC)(nil), "csi.NodeServiceCapability.RPC") - proto.RegisterEnum("csi.VolumeCapability_AccessMode_Mode", VolumeCapability_AccessMode_Mode_name, VolumeCapability_AccessMode_Mode_value) - proto.RegisterEnum("csi.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value) - proto.RegisterEnum("csi.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for Identity service - -type IdentityClient interface { - GetSupportedVersions(ctx context.Context, in *GetSupportedVersionsRequest, opts ...grpc.CallOption) (*GetSupportedVersionsResponse, error) - GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) -} - -type identityClient struct { - cc *grpc.ClientConn -} - -func NewIdentityClient(cc *grpc.ClientConn) IdentityClient { - return &identityClient{cc} -} - -func (c *identityClient) GetSupportedVersions(ctx context.Context, in *GetSupportedVersionsRequest, opts ...grpc.CallOption) (*GetSupportedVersionsResponse, error) { - out := new(GetSupportedVersionsResponse) - err := grpc.Invoke(ctx, "/csi.Identity/GetSupportedVersions", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *identityClient) GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) { - out := new(GetPluginInfoResponse) - err := grpc.Invoke(ctx, "/csi.Identity/GetPluginInfo", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Identity service - -type IdentityServer interface { - GetSupportedVersions(context.Context, *GetSupportedVersionsRequest) (*GetSupportedVersionsResponse, error) - GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error) -} - -func RegisterIdentityServer(s *grpc.Server, srv IdentityServer) { - s.RegisterService(&_Identity_serviceDesc, srv) -} - -func _Identity_GetSupportedVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetSupportedVersionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IdentityServer).GetSupportedVersions(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.Identity/GetSupportedVersions", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IdentityServer).GetSupportedVersions(ctx, req.(*GetSupportedVersionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Identity_GetPluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetPluginInfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IdentityServer).GetPluginInfo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.Identity/GetPluginInfo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IdentityServer).GetPluginInfo(ctx, req.(*GetPluginInfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Identity_serviceDesc = grpc.ServiceDesc{ - ServiceName: "csi.Identity", - HandlerType: (*IdentityServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetSupportedVersions", - Handler: _Identity_GetSupportedVersions_Handler, - }, - { - MethodName: "GetPluginInfo", - Handler: _Identity_GetPluginInfo_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "csi.proto", -} - -// Client API for Controller service - -type ControllerClient interface { - CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) - DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) - ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) - ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) - ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) - ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) - GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) - ControllerProbe(ctx context.Context, in *ControllerProbeRequest, opts ...grpc.CallOption) (*ControllerProbeResponse, error) - ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) -} - -type controllerClient struct { - cc *grpc.ClientConn -} - -func NewControllerClient(cc *grpc.ClientConn) ControllerClient { - return &controllerClient{cc} -} - -func (c *controllerClient) CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) { - out := new(CreateVolumeResponse) - err := grpc.Invoke(ctx, "/csi.Controller/CreateVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) { - out := new(DeleteVolumeResponse) - err := grpc.Invoke(ctx, "/csi.Controller/DeleteVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) { - out := new(ControllerPublishVolumeResponse) - err := grpc.Invoke(ctx, "/csi.Controller/ControllerPublishVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) { - out := new(ControllerUnpublishVolumeResponse) - err := grpc.Invoke(ctx, "/csi.Controller/ControllerUnpublishVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) { - out := new(ValidateVolumeCapabilitiesResponse) - err := grpc.Invoke(ctx, "/csi.Controller/ValidateVolumeCapabilities", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) { - out := new(ListVolumesResponse) - err := grpc.Invoke(ctx, "/csi.Controller/ListVolumes", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) { - out := new(GetCapacityResponse) - err := grpc.Invoke(ctx, "/csi.Controller/GetCapacity", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ControllerProbe(ctx context.Context, in *ControllerProbeRequest, opts ...grpc.CallOption) (*ControllerProbeResponse, error) { - out := new(ControllerProbeResponse) - err := grpc.Invoke(ctx, "/csi.Controller/ControllerProbe", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controllerClient) ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) { - out := new(ControllerGetCapabilitiesResponse) - err := grpc.Invoke(ctx, "/csi.Controller/ControllerGetCapabilities", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Controller service - -type ControllerServer interface { - CreateVolume(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error) - DeleteVolume(context.Context, *DeleteVolumeRequest) (*DeleteVolumeResponse, error) - ControllerPublishVolume(context.Context, *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) - ControllerUnpublishVolume(context.Context, *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) - ValidateVolumeCapabilities(context.Context, *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) - ListVolumes(context.Context, *ListVolumesRequest) (*ListVolumesResponse, error) - GetCapacity(context.Context, *GetCapacityRequest) (*GetCapacityResponse, error) - ControllerProbe(context.Context, *ControllerProbeRequest) (*ControllerProbeResponse, error) - ControllerGetCapabilities(context.Context, *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) -} - -func RegisterControllerServer(s *grpc.Server, srv ControllerServer) { - s.RegisterService(&_Controller_serviceDesc, srv) -} - -func _Controller_CreateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).CreateVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.Controller/CreateVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).CreateVolume(ctx, req.(*CreateVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_DeleteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).DeleteVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.Controller/DeleteVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).DeleteVolume(ctx, req.(*DeleteVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ControllerPublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ControllerPublishVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ControllerPublishVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.Controller/ControllerPublishVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ControllerPublishVolume(ctx, req.(*ControllerPublishVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ControllerUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ControllerUnpublishVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ControllerUnpublishVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.Controller/ControllerUnpublishVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ControllerUnpublishVolume(ctx, req.(*ControllerUnpublishVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ValidateVolumeCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ValidateVolumeCapabilitiesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.Controller/ValidateVolumeCapabilities", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, req.(*ValidateVolumeCapabilitiesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ListVolumes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListVolumesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ListVolumes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.Controller/ListVolumes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ListVolumes(ctx, req.(*ListVolumesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_GetCapacity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetCapacityRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).GetCapacity(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.Controller/GetCapacity", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).GetCapacity(ctx, req.(*GetCapacityRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ControllerProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ControllerProbeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ControllerProbe(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.Controller/ControllerProbe", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ControllerProbe(ctx, req.(*ControllerProbeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Controller_ControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ControllerGetCapabilitiesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControllerServer).ControllerGetCapabilities(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.Controller/ControllerGetCapabilities", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControllerServer).ControllerGetCapabilities(ctx, req.(*ControllerGetCapabilitiesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Controller_serviceDesc = grpc.ServiceDesc{ - ServiceName: "csi.Controller", - HandlerType: (*ControllerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateVolume", - Handler: _Controller_CreateVolume_Handler, - }, - { - MethodName: "DeleteVolume", - Handler: _Controller_DeleteVolume_Handler, - }, - { - MethodName: "ControllerPublishVolume", - Handler: _Controller_ControllerPublishVolume_Handler, - }, - { - MethodName: "ControllerUnpublishVolume", - Handler: _Controller_ControllerUnpublishVolume_Handler, - }, - { - MethodName: "ValidateVolumeCapabilities", - Handler: _Controller_ValidateVolumeCapabilities_Handler, - }, - { - MethodName: "ListVolumes", - Handler: _Controller_ListVolumes_Handler, - }, - { - MethodName: "GetCapacity", - Handler: _Controller_GetCapacity_Handler, - }, - { - MethodName: "ControllerProbe", - Handler: _Controller_ControllerProbe_Handler, - }, - { - MethodName: "ControllerGetCapabilities", - Handler: _Controller_ControllerGetCapabilities_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "csi.proto", -} - -// Client API for Node service - -type NodeClient interface { - NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) - NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) - GetNodeID(ctx context.Context, in *GetNodeIDRequest, opts ...grpc.CallOption) (*GetNodeIDResponse, error) - NodeProbe(ctx context.Context, in *NodeProbeRequest, opts ...grpc.CallOption) (*NodeProbeResponse, error) - NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) -} - -type nodeClient struct { - cc *grpc.ClientConn -} - -func NewNodeClient(cc *grpc.ClientConn) NodeClient { - return &nodeClient{cc} -} - -func (c *nodeClient) NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) { - out := new(NodePublishVolumeResponse) - err := grpc.Invoke(ctx, "/csi.Node/NodePublishVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) { - out := new(NodeUnpublishVolumeResponse) - err := grpc.Invoke(ctx, "/csi.Node/NodeUnpublishVolume", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) GetNodeID(ctx context.Context, in *GetNodeIDRequest, opts ...grpc.CallOption) (*GetNodeIDResponse, error) { - out := new(GetNodeIDResponse) - err := grpc.Invoke(ctx, "/csi.Node/GetNodeID", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) NodeProbe(ctx context.Context, in *NodeProbeRequest, opts ...grpc.CallOption) (*NodeProbeResponse, error) { - out := new(NodeProbeResponse) - err := grpc.Invoke(ctx, "/csi.Node/NodeProbe", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) { - out := new(NodeGetCapabilitiesResponse) - err := grpc.Invoke(ctx, "/csi.Node/NodeGetCapabilities", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for Node service - -type NodeServer interface { - NodePublishVolume(context.Context, *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) - NodeUnpublishVolume(context.Context, *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) - GetNodeID(context.Context, *GetNodeIDRequest) (*GetNodeIDResponse, error) - NodeProbe(context.Context, *NodeProbeRequest) (*NodeProbeResponse, error) - NodeGetCapabilities(context.Context, *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) -} - -func RegisterNodeServer(s *grpc.Server, srv NodeServer) { - s.RegisterService(&_Node_serviceDesc, srv) -} - -func _Node_NodePublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodePublishVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodePublishVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.Node/NodePublishVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodePublishVolume(ctx, req.(*NodePublishVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodeUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeUnpublishVolumeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeUnpublishVolume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.Node/NodeUnpublishVolume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeUnpublishVolume(ctx, req.(*NodeUnpublishVolumeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_GetNodeID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNodeIDRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).GetNodeID(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.Node/GetNodeID", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).GetNodeID(ctx, req.(*GetNodeIDRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodeProbe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeProbeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeProbe(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.Node/NodeProbe", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeProbe(ctx, req.(*NodeProbeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_NodeGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NodeGetCapabilitiesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).NodeGetCapabilities(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/csi.Node/NodeGetCapabilities", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).NodeGetCapabilities(ctx, req.(*NodeGetCapabilitiesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Node_serviceDesc = grpc.ServiceDesc{ - ServiceName: "csi.Node", - HandlerType: (*NodeServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "NodePublishVolume", - Handler: _Node_NodePublishVolume_Handler, - }, - { - MethodName: "NodeUnpublishVolume", - Handler: _Node_NodeUnpublishVolume_Handler, - }, - { - MethodName: "GetNodeID", - Handler: _Node_GetNodeID_Handler, - }, - { - MethodName: "NodeProbe", - Handler: _Node_NodeProbe_Handler, - }, - { - MethodName: "NodeGetCapabilities", - Handler: _Node_NodeGetCapabilities_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "csi.proto", -} - -func init() { proto.RegisterFile("csi.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 1993 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4b, 0x73, 0xe3, 0x58, - 0x15, 0xb6, 0x6c, 0xe7, 0xe1, 0xe3, 0x38, 0xed, 0x5c, 0xe7, 0xa1, 0x28, 0xfd, 0x70, 0xab, 0xa7, - 0x7b, 0x42, 0x15, 0xe3, 0xa2, 0x3c, 0x54, 0xd1, 0xe9, 0x9e, 0x19, 0x48, 0x6c, 0x4f, 0x62, 0x26, - 0x71, 0xa7, 0x14, 0xa7, 0x9b, 0x01, 0xa6, 0x84, 0x62, 0xdf, 0xa4, 0x45, 0xcb, 0x92, 0x47, 0x92, - 0x5d, 0xed, 0x3d, 0x4b, 0x16, 0xec, 0xd8, 0xc1, 0x72, 0xa0, 0x58, 0x02, 0xbf, 0x80, 0xbf, 0x00, - 0xac, 0xd9, 0xf2, 0x0f, 0xa8, 0x62, 0x43, 0xdd, 0x87, 0x64, 0x49, 0x96, 0x1c, 0x3b, 0x93, 0x1e, - 0x66, 0x27, 0x9d, 0xc7, 0x77, 0xcf, 0xe3, 0xde, 0x73, 0xce, 0x95, 0x20, 0xd7, 0x71, 0xf4, 0x4a, - 0xdf, 0xb6, 0x5c, 0x0b, 0x65, 0x3a, 0x8e, 0x2e, 0xdf, 0x83, 0x9d, 0x43, 0xec, 0x9e, 0x0d, 0xfa, - 0x7d, 0xcb, 0x76, 0x71, 0xf7, 0x25, 0xb6, 0x1d, 0xdd, 0x32, 0x1d, 0x05, 0x7f, 0x39, 0xc0, 0x8e, - 0x2b, 0xff, 0x0c, 0xee, 0xc6, 0xb3, 0x9d, 0xbe, 0x65, 0x3a, 0x18, 0x3d, 0x07, 0xe4, 0x78, 0x4c, - 0x75, 0xc8, 0xb9, 0xa2, 0x50, 0xce, 0xec, 0xe6, 0xab, 0x2b, 0x15, 0xb2, 0x16, 0x57, 0x51, 0xd6, - 0x9c, 0x28, 0x88, 0xfc, 0x19, 0x2c, 0xf1, 0x67, 0xb4, 0x0e, 0x0b, 0x3d, 0xed, 0x97, 0x96, 0x2d, - 0x0a, 0x65, 0x61, 0xb7, 0xa0, 0xb0, 0x17, 0x4a, 0xd5, 0x4d, 0xcb, 0x16, 0xd3, 0x9c, 0x4a, 0x5e, - 0x08, 0xb5, 0xaf, 0xb9, 0x9d, 0xd7, 0x62, 0x86, 0x51, 0xe9, 0x8b, 0xfc, 0x09, 0xac, 0x1f, 0x62, - 0xf7, 0xd4, 0x18, 0x5c, 0xe9, 0x66, 0xd3, 0xbc, 0xb4, 0xb8, 0x07, 0xe8, 0x09, 0x2c, 0x71, 0xbb, - 0x28, 0x76, 0xd4, 0x2c, 0x8f, 0x29, 0xff, 0x43, 0x80, 0x8d, 0x08, 0x00, 0xf7, 0x11, 0x41, 0xd6, - 0xd4, 0x7a, 0x98, 0xaa, 0xe7, 0x14, 0xfa, 0x8c, 0x1e, 0xc3, 0xea, 0x10, 0x9b, 0x5d, 0xcb, 0xf6, - 0x9c, 0xa6, 0x26, 0xe6, 0x94, 0x02, 0xa3, 0x7a, 0x6e, 0xd5, 0x61, 0xb9, 0xa7, 0x99, 0xfa, 0x25, - 0x76, 0x5c, 0x31, 0x43, 0x83, 0xb2, 0x4b, 0x57, 0x8f, 0x5d, 0xa8, 0x72, 0xc2, 0x45, 0x1b, 0xa6, - 0x6b, 0x8f, 0x14, 0x5f, 0x53, 0x7a, 0x0e, 0x85, 0x10, 0x0b, 0x15, 0x21, 0xf3, 0x06, 0x8f, 0xb8, - 0x41, 0xe4, 0x91, 0xc4, 0x64, 0xa8, 0x19, 0x03, 0xcc, 0xcd, 0x60, 0x2f, 0xcf, 0xd2, 0x4f, 0x05, - 0xf9, 0xbf, 0x19, 0x28, 0xd5, 0x6c, 0xac, 0xb9, 0xf8, 0xa5, 0x65, 0x0c, 0x7a, 0x78, 0xce, 0xb8, - 0xf8, 0xde, 0xa7, 0x03, 0xde, 0xef, 0xc1, 0x6a, 0x47, 0xeb, 0x6b, 0x1d, 0xdd, 0x1d, 0xa9, 0xb6, - 0x66, 0x5e, 0x61, 0x9a, 0x8a, 0x7c, 0x15, 0x51, 0x88, 0x1a, 0x67, 0x29, 0x84, 0xa3, 0x14, 0x3a, - 0xc1, 0x57, 0xf4, 0x29, 0x94, 0x86, 0xd4, 0x0e, 0x95, 0xd0, 0x2f, 0x74, 0x43, 0x77, 0x75, 0xec, - 0x88, 0x59, 0x1a, 0x9c, 0x0d, 0x66, 0x02, 0xe5, 0xd7, 0x3c, 0xf6, 0x48, 0x41, 0xc3, 0x30, 0x45, - 0xc7, 0x0e, 0x3a, 0x02, 0xe8, 0x6b, 0xb6, 0xd6, 0xc3, 0x2e, 0xb6, 0x1d, 0x71, 0x21, 0x10, 0xdb, - 0x18, 0x67, 0x2b, 0xa7, 0xbe, 0x28, 0x8b, 0x6d, 0x40, 0x17, 0xfd, 0x04, 0x8a, 0x03, 0x07, 0xdb, - 0x6a, 0xc7, 0xc6, 0x5d, 0x6c, 0xba, 0xba, 0x66, 0x38, 0xe2, 0x22, 0xc5, 0xfb, 0x20, 0x11, 0xef, - 0xdc, 0xc1, 0x76, 0x6d, 0x2c, 0xcf, 0x40, 0xef, 0x0c, 0xc2, 0x54, 0xe9, 0x63, 0xb8, 0x13, 0x59, - 0x78, 0x9e, 0xcc, 0x49, 0x07, 0xb0, 0x1e, 0xb7, 0xce, 0x5c, 0xd9, 0x3f, 0x82, 0xf5, 0xb0, 0xfd, - 0x7c, 0x4f, 0x7f, 0x0f, 0xf2, 0x3c, 0x0d, 0xba, 0x79, 0x69, 0xf1, 0x1d, 0x70, 0x27, 0x10, 0x7e, - 0xba, 0x31, 0x61, 0xe8, 0x3f, 0xcb, 0xbf, 0xcb, 0x42, 0x31, 0x9a, 0x19, 0xb4, 0x07, 0x0b, 0x17, - 0x86, 0xd5, 0x79, 0xc3, 0x01, 0x1e, 0xc6, 0xe6, 0xaf, 0x72, 0x40, 0x44, 0x18, 0xf5, 0x28, 0xa5, - 0x30, 0x0d, 0xa2, 0xda, 0xb3, 0x06, 0xa6, 0x4b, 0x6d, 0x4e, 0x54, 0x3d, 0x21, 0x22, 0x63, 0x55, - 0xaa, 0x81, 0xf6, 0x21, 0xaf, 0x75, 0x3a, 0xd8, 0x71, 0xd4, 0x9e, 0xd5, 0xf5, 0xf6, 0x5e, 0x39, - 0x1e, 0x60, 0x9f, 0x0a, 0x9e, 0x58, 0x5d, 0xac, 0x80, 0xe6, 0x3f, 0x4b, 0x05, 0xc8, 0x07, 0xac, - 0x92, 0x0e, 0x21, 0x1f, 0x58, 0x09, 0x6d, 0xc1, 0xd2, 0xa5, 0xa3, 0xba, 0xa3, 0xbe, 0x77, 0xe8, - 0x17, 0x2f, 0x9d, 0xf6, 0xa8, 0x8f, 0xd1, 0x03, 0xc8, 0x53, 0x13, 0xd4, 0x4b, 0x43, 0xbb, 0x72, - 0xc4, 0x74, 0x39, 0xb3, 0x9b, 0x53, 0x80, 0x92, 0x3e, 0x25, 0x14, 0xe9, 0xdf, 0x02, 0xc0, 0x78, - 0x49, 0xb4, 0x07, 0x59, 0x6a, 0x22, 0x41, 0x59, 0xad, 0x3e, 0xbe, 0xce, 0xc4, 0x0a, 0xb5, 0x93, - 0xaa, 0xc8, 0xbf, 0x17, 0x20, 0x4b, 0x31, 0xf2, 0xb0, 0x74, 0xde, 0xfa, 0xac, 0xf5, 0xe2, 0x55, - 0xab, 0x98, 0x42, 0x9b, 0x80, 0xce, 0x9a, 0xad, 0xc3, 0xe3, 0x86, 0xda, 0x7a, 0x51, 0x6f, 0xa8, - 0xaf, 0x94, 0x66, 0xbb, 0xa1, 0x14, 0x05, 0xb4, 0x03, 0x5b, 0x41, 0xba, 0xd2, 0xd8, 0xaf, 0x37, - 0x14, 0xf5, 0x45, 0xeb, 0xf8, 0xf3, 0x62, 0x1a, 0x49, 0xb0, 0x79, 0x72, 0x7e, 0xdc, 0x6e, 0x4e, - 0xf2, 0x32, 0xe8, 0x2e, 0x88, 0x01, 0x1e, 0xc7, 0xe0, 0xb0, 0x59, 0x02, 0x1b, 0xe0, 0xb2, 0x47, - 0xce, 0x5c, 0x38, 0x28, 0xf8, 0x69, 0x20, 0x91, 0x92, 0x5f, 0x41, 0x21, 0x74, 0xf2, 0x49, 0x8d, - 0xb4, 0xf1, 0x97, 0x03, 0xdd, 0xc6, 0x5d, 0xf5, 0x62, 0xe4, 0x62, 0x87, 0x86, 0x21, 0xab, 0x14, - 0x3c, 0xea, 0x01, 0x21, 0x92, 0x98, 0x1a, 0x7a, 0x4f, 0x77, 0xb9, 0x4c, 0x9a, 0xca, 0x00, 0x25, - 0x51, 0x01, 0xf9, 0x6f, 0x02, 0xc0, 0x78, 0x53, 0x12, 0x58, 0xbf, 0xf8, 0x84, 0x60, 0x3d, 0x2a, - 0x83, 0x5d, 0x85, 0xb4, 0xde, 0xe5, 0x07, 0x22, 0xad, 0x77, 0xd1, 0x0f, 0x01, 0x34, 0xd7, 0xb5, - 0xf5, 0x8b, 0x01, 0x51, 0x61, 0xc5, 0xf8, 0x41, 0x64, 0xc3, 0x57, 0xf6, 0x7d, 0x09, 0x5e, 0x27, - 0xc6, 0x2a, 0xe4, 0x34, 0x47, 0xd8, 0x73, 0x9d, 0xc4, 0xff, 0x08, 0x50, 0xaa, 0x63, 0x03, 0xdf, - 0xb4, 0x0e, 0xef, 0x40, 0xce, 0x3b, 0xb1, 0x9e, 0x5b, 0xcb, 0xfc, 0x78, 0x76, 0x63, 0x6b, 0x58, - 0x26, 0x50, 0xc3, 0x62, 0x16, 0x9e, 0xb1, 0x86, 0xdd, 0x46, 0x11, 0xda, 0x84, 0xf5, 0xb0, 0x01, - 0xac, 0x08, 0xc9, 0x7f, 0xc9, 0xc2, 0xfd, 0x9a, 0x65, 0xba, 0xb6, 0x65, 0x18, 0xd8, 0x3e, 0x1d, - 0x5c, 0x18, 0xba, 0xf3, 0xfa, 0x1d, 0x44, 0x67, 0x0b, 0x96, 0x4c, 0xab, 0x4b, 0x59, 0x19, 0x76, - 0x9c, 0xc9, 0x6b, 0xb3, 0x8b, 0x0e, 0x60, 0x2d, 0xda, 0x8c, 0x46, 0x62, 0x96, 0xae, 0x93, 0xd0, - 0x8a, 0x8a, 0xc3, 0x68, 0x09, 0x94, 0x60, 0xd9, 0xc6, 0x5a, 0xd7, 0x32, 0x8d, 0x91, 0xb8, 0x50, - 0x16, 0x76, 0x97, 0x15, 0xff, 0x1d, 0x75, 0x12, 0x5b, 0xcb, 0x53, 0xd6, 0x5a, 0xa6, 0x3a, 0x3f, - 0x5b, 0x86, 0xd0, 0xa5, 0xef, 0x44, 0x60, 0x7f, 0x2f, 0xd1, 0x55, 0xf6, 0x66, 0x59, 0x85, 0xbd, - 0x45, 0x77, 0x3e, 0x77, 0x74, 0x4c, 0xbe, 0x8d, 0x9d, 0x20, 0xd5, 0x60, 0x23, 0x76, 0xb9, 0xb9, - 0xb6, 0xd3, 0xdf, 0x05, 0x78, 0x90, 0xe8, 0x13, 0xef, 0x6f, 0x6f, 0xa0, 0xd4, 0x67, 0x0c, 0x35, - 0xdc, 0xe7, 0x48, 0x58, 0x9e, 0x4f, 0x0f, 0x0b, 0x9f, 0xc6, 0x42, 0x54, 0x52, 0x1d, 0x58, 0x60, - 0xd6, 0xfa, 0x51, 0xba, 0x54, 0x87, 0xcd, 0x78, 0xe1, 0xb9, 0xdc, 0xfa, 0x53, 0x1a, 0xca, 0x63, - 0x9b, 0xce, 0xcd, 0xfe, 0x37, 0x7f, 0x1e, 0x70, 0xcc, 0x7e, 0x65, 0x93, 0xd9, 0xb3, 0x48, 0xc8, - 0xe2, 0xcd, 0xfb, 0x06, 0x6b, 0xca, 0x23, 0x78, 0x38, 0xc5, 0x1a, 0x5e, 0x60, 0xfe, 0x95, 0x86, - 0x87, 0x2f, 0x35, 0x43, 0xef, 0xfa, 0x03, 0x50, 0x70, 0x86, 0xbc, 0xd5, 0x98, 0x26, 0xcc, 0xb5, - 0x99, 0x79, 0xe7, 0x5a, 0x3d, 0xee, 0x34, 0xb3, 0x1c, 0x7c, 0xc4, 0x50, 0xae, 0xf3, 0x67, 0xe6, - 0x03, 0x7d, 0x2b, 0x87, 0xf1, 0xe7, 0x20, 0x4f, 0xb3, 0x88, 0x1f, 0xc7, 0xbb, 0x90, 0xf3, 0xaf, - 0x7f, 0x14, 0x77, 0x59, 0x19, 0x13, 0x90, 0x08, 0x4b, 0x3d, 0xec, 0x38, 0xda, 0x95, 0x87, 0xef, - 0xbd, 0xca, 0xbf, 0x12, 0x00, 0x1d, 0xeb, 0x0e, 0x9f, 0xcb, 0xe6, 0xce, 0x18, 0x19, 0xd7, 0xb4, - 0xb7, 0x2a, 0x36, 0x5d, 0x5b, 0xe7, 0xa3, 0x45, 0x41, 0x81, 0x9e, 0xf6, 0xb6, 0xc1, 0x28, 0x64, - 0x96, 0x70, 0x5c, 0xcd, 0x76, 0x75, 0xf3, 0x4a, 0x75, 0xad, 0x37, 0xd8, 0xe4, 0x07, 0xa2, 0xe0, - 0x51, 0xdb, 0x84, 0x28, 0xff, 0x51, 0x80, 0x52, 0xc8, 0x0c, 0xee, 0xd6, 0x53, 0x58, 0xf2, 0xb0, - 0x59, 0x65, 0xb9, 0x4f, 0xed, 0x88, 0x11, 0xad, 0xb0, 0x24, 0x78, 0xe2, 0xe8, 0x1e, 0x80, 0x89, - 0xdf, 0xba, 0x7c, 0x51, 0xe6, 0x75, 0x8e, 0x50, 0xe8, 0x82, 0xd2, 0x1e, 0x2c, 0xb0, 0x54, 0xcc, - 0x3f, 0xa7, 0xff, 0x3a, 0x0d, 0xe8, 0x10, 0xbb, 0xfe, 0x28, 0x36, 0x67, 0xc8, 0x12, 0xf6, 0x71, - 0x7a, 0xde, 0x7d, 0x7c, 0x18, 0xba, 0x9f, 0xb1, 0x63, 0xf0, 0xbe, 0x77, 0xf7, 0x8d, 0x18, 0x37, - 0xed, 0x7a, 0xf6, 0x35, 0x2f, 0x51, 0x72, 0x1d, 0x4a, 0xa1, 0x05, 0x79, 0xe6, 0x3e, 0x00, 0xa4, - 0x0d, 0x35, 0xdd, 0xd0, 0x2e, 0x0c, 0xe6, 0x29, 0xe1, 0xf2, 0x41, 0x72, 0xcd, 0xe7, 0x78, 0x6a, - 0xf2, 0x8f, 0x60, 0x33, 0xd0, 0x2e, 0x6c, 0xeb, 0x62, 0xde, 0x82, 0x2c, 0x6f, 0xc3, 0xd6, 0x04, - 0x02, 0xaf, 0x52, 0x3f, 0x0e, 0xd6, 0x7d, 0x6e, 0xec, 0x0d, 0x6b, 0x94, 0xac, 0x07, 0xcb, 0xe2, - 0x04, 0x16, 0x77, 0xbe, 0x0e, 0x2b, 0x31, 0xc9, 0x2d, 0x47, 0x4a, 0xfc, 0x19, 0xb6, 0x87, 0x7a, - 0x27, 0x98, 0xe7, 0x90, 0x96, 0xfc, 0xdb, 0x34, 0xec, 0x4c, 0x91, 0x46, 0x4f, 0x21, 0x63, 0xf7, - 0x3b, 0xdc, 0xdc, 0xf7, 0xae, 0x03, 0xaf, 0x28, 0xa7, 0xb5, 0xa3, 0x94, 0x42, 0x54, 0xa4, 0xbf, - 0x0a, 0x90, 0x51, 0x4e, 0x6b, 0xe8, 0x63, 0xc8, 0xfa, 0x77, 0xb0, 0xd5, 0xea, 0x77, 0x66, 0x81, - 0xa8, 0x90, 0x6b, 0x9a, 0x42, 0xd5, 0x64, 0x0b, 0xb2, 0xf4, 0xd2, 0x16, 0xba, 0x40, 0x89, 0xb0, - 0x5e, 0x53, 0x1a, 0xfb, 0xed, 0x86, 0x5a, 0x6f, 0x1c, 0x37, 0xda, 0x0d, 0xf5, 0xe5, 0x8b, 0xe3, - 0xf3, 0x93, 0x46, 0x51, 0x20, 0x37, 0xa1, 0xd3, 0xf3, 0x83, 0xe3, 0xe6, 0xd9, 0x91, 0x7a, 0xde, - 0xf2, 0x9e, 0x38, 0x37, 0x8d, 0x8a, 0xb0, 0x72, 0xdc, 0x3c, 0x6b, 0x73, 0xc2, 0x59, 0x31, 0x43, - 0x28, 0x87, 0x8d, 0xb6, 0x5a, 0xdb, 0x3f, 0xdd, 0xaf, 0x35, 0xdb, 0x9f, 0x17, 0xb3, 0x07, 0x8b, - 0xcc, 0x5e, 0xf9, 0x9f, 0x0b, 0x20, 0xb6, 0xac, 0x2e, 0x7e, 0x77, 0x13, 0x6d, 0x37, 0x7e, 0xbc, - 0x61, 0xc7, 0xec, 0xfb, 0x14, 0x30, 0xc9, 0x80, 0xd9, 0xe7, 0x1a, 0x52, 0x3e, 0x5d, 0xcd, 0xbe, - 0xc2, 0xae, 0xda, 0xd7, 0xdc, 0xd7, 0x74, 0x30, 0xce, 0x29, 0xc0, 0x48, 0xa7, 0x9a, 0xfb, 0x3a, - 0x7e, 0x7e, 0x5e, 0xb8, 0xf9, 0xfc, 0xbc, 0x18, 0x99, 0x9f, 0xbf, 0x88, 0x99, 0x47, 0xd8, 0x64, - 0x5b, 0x9d, 0xee, 0xe3, 0x6c, 0x93, 0xf3, 0x2f, 0xe2, 0x7a, 0xed, 0x32, 0xc5, 0xff, 0x70, 0x3a, - 0xfe, 0xac, 0x2d, 0xf6, 0x56, 0x26, 0xc3, 0x6f, 0xcf, 0xe4, 0xbd, 0x03, 0xdb, 0x31, 0x21, 0xe1, - 0x65, 0xec, 0xab, 0x34, 0x48, 0x84, 0xfb, 0x2e, 0x27, 0xd7, 0xc8, 0x8e, 0xcc, 0x4c, 0xec, 0x48, - 0x35, 0x71, 0x82, 0x1d, 0x9f, 0x8a, 0xff, 0xfb, 0xec, 0x7a, 0x0f, 0x76, 0x62, 0xed, 0xe0, 0x81, - 0x7c, 0x06, 0xc5, 0x43, 0xec, 0x12, 0x89, 0x66, 0x7d, 0xde, 0xfa, 0xff, 0x5d, 0x58, 0x0b, 0xe8, - 0xf2, 0x7a, 0x1f, 0x98, 0xf7, 0x85, 0xe0, 0xbc, 0x4f, 0x56, 0xa2, 0xf9, 0xbc, 0x49, 0x43, 0x2b, - 0xc1, 0x5a, 0x40, 0x97, 0x9b, 0x5e, 0x67, 0x5b, 0xe0, 0x6b, 0x36, 0xb1, 0x2f, 0x58, 0x7c, 0x92, - 0xda, 0xd7, 0x27, 0x91, 0xf6, 0xc5, 0x46, 0x2f, 0xc9, 0xcf, 0xef, 0x75, 0x8d, 0xeb, 0x0f, 0x02, - 0x6c, 0xc4, 0xca, 0xa1, 0x6a, 0xb0, 0x65, 0xdd, 0x4f, 0x06, 0x0c, 0x36, 0xab, 0x33, 0xd6, 0xab, - 0x7e, 0x10, 0xea, 0x55, 0x8f, 0xa6, 0xeb, 0x06, 0xbb, 0x54, 0x29, 0xa6, 0x4b, 0x79, 0x9d, 0xa4, - 0xfa, 0x67, 0x01, 0x96, 0x9b, 0x74, 0xa3, 0xb9, 0xa4, 0x1a, 0xae, 0xc7, 0xfd, 0x8b, 0x41, 0x65, - 0x6f, 0xac, 0x4a, 0xfa, 0x8b, 0x23, 0x3d, 0x9c, 0x22, 0xc1, 0x33, 0x97, 0x42, 0x47, 0x50, 0x08, - 0xfd, 0x96, 0x40, 0xdb, 0x71, 0xbf, 0x2a, 0x18, 0xa0, 0x94, 0xfc, 0x17, 0x43, 0x4e, 0x55, 0xbf, - 0x5a, 0x04, 0x18, 0xf7, 0x69, 0xd4, 0x80, 0x95, 0xe0, 0x37, 0x68, 0x24, 0x26, 0x7d, 0x56, 0x97, - 0xb6, 0x63, 0x38, 0xbe, 0x7d, 0x0d, 0x58, 0x09, 0x7e, 0x45, 0xe2, 0x30, 0x31, 0x5f, 0xb6, 0x38, - 0x4c, 0xec, 0x27, 0xa7, 0x14, 0xba, 0x0c, 0x0d, 0x62, 0xc1, 0x03, 0x88, 0x1e, 0xcd, 0xf0, 0xb9, - 0x44, 0x7a, 0x6f, 0x96, 0x8f, 0x07, 0x72, 0x0a, 0x19, 0xb0, 0x9d, 0x78, 0x41, 0x45, 0x8f, 0x67, - 0xba, 0x4e, 0x4b, 0x4f, 0xae, 0x13, 0xf3, 0x57, 0xb3, 0x40, 0x4a, 0xbe, 0x86, 0xa1, 0x27, 0xb3, - 0xdd, 0x1c, 0xa5, 0xf7, 0xaf, 0x95, 0xf3, 0x17, 0x3c, 0x80, 0x7c, 0xe0, 0x9a, 0x83, 0xb6, 0x26, - 0x2f, 0x3e, 0x0c, 0x52, 0x4c, 0xba, 0x11, 0x31, 0x8c, 0xc0, 0x6c, 0xce, 0x31, 0x26, 0xaf, 0x07, - 0x1c, 0x23, 0x66, 0x8c, 0x97, 0x53, 0xa8, 0x05, 0x77, 0x22, 0x73, 0x35, 0xda, 0x89, 0x66, 0x28, - 0x50, 0xde, 0xa4, 0xbb, 0xf1, 0xcc, 0xf8, 0xb4, 0x45, 0x2a, 0xd0, 0x44, 0xda, 0xe2, 0xeb, 0xdc, - 0x44, 0xda, 0x12, 0x0a, 0x99, 0x9c, 0xaa, 0xfe, 0x26, 0x03, 0x59, 0x52, 0x25, 0x50, 0x9b, 0x57, - 0xd3, 0xd0, 0x2e, 0xb9, 0x37, 0x75, 0x08, 0x91, 0xee, 0x27, 0xb1, 0x7d, 0x67, 0x7e, 0x0a, 0xa5, - 0x98, 0x46, 0x83, 0x1e, 0x5c, 0xd3, 0x0a, 0xa5, 0x72, 0xb2, 0x80, 0x8f, 0xfd, 0x11, 0xe4, 0xfc, - 0x4e, 0x83, 0x36, 0xbc, 0x0c, 0x85, 0xba, 0x96, 0xb4, 0x19, 0x25, 0x07, 0xb5, 0xfd, 0xee, 0xc1, - 0xb5, 0xa3, 0x9d, 0x88, 0x6b, 0x4f, 0x36, 0x19, 0xdf, 0xaf, 0x68, 0x7a, 0xc6, 0x7e, 0x25, 0x24, - 0xa6, 0x9c, 0x2c, 0xe0, 0x61, 0x5f, 0x2c, 0xd2, 0x9f, 0xe3, 0x1f, 0xfe, 0x2f, 0x00, 0x00, 0xff, - 0xff, 0x17, 0x25, 0x65, 0xbe, 0x29, 0x1f, 0x00, 0x00, -} diff --git a/vendor/github.com/container-storage-interface/spec/lib/go/csi/v0/csi.pb.go b/vendor/github.com/container-storage-interface/spec/lib/go/csi/v0/csi.pb.go new file mode 100644 index 000000000000..969362ff47d3 --- /dev/null +++ b/vendor/github.com/container-storage-interface/spec/lib/go/csi/v0/csi.pb.go @@ -0,0 +1,2671 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: csi.proto + +/* +Package csi is a generated protocol buffer package. + +It is generated from these files: + csi.proto + +It has these top-level messages: + GetPluginInfoRequest + GetPluginInfoResponse + GetPluginCapabilitiesRequest + GetPluginCapabilitiesResponse + PluginCapability + ProbeRequest + ProbeResponse + CreateVolumeRequest + CreateVolumeResponse + VolumeCapability + CapacityRange + Volume + DeleteVolumeRequest + DeleteVolumeResponse + ControllerPublishVolumeRequest + ControllerPublishVolumeResponse + ControllerUnpublishVolumeRequest + ControllerUnpublishVolumeResponse + ValidateVolumeCapabilitiesRequest + ValidateVolumeCapabilitiesResponse + ListVolumesRequest + ListVolumesResponse + GetCapacityRequest + GetCapacityResponse + ControllerGetCapabilitiesRequest + ControllerGetCapabilitiesResponse + ControllerServiceCapability + NodeStageVolumeRequest + NodeStageVolumeResponse + NodeUnstageVolumeRequest + NodeUnstageVolumeResponse + NodePublishVolumeRequest + NodePublishVolumeResponse + NodeUnpublishVolumeRequest + NodeUnpublishVolumeResponse + NodeGetIdRequest + NodeGetIdResponse + NodeGetCapabilitiesRequest + NodeGetCapabilitiesResponse + NodeServiceCapability +*/ +package csi + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type PluginCapability_Service_Type int32 + +const ( + PluginCapability_Service_UNKNOWN PluginCapability_Service_Type = 0 + // CONTROLLER_SERVICE indicates that the Plugin provides RPCs for + // the ControllerService. Plugins SHOULD provide this capability. + // In rare cases certain plugins may wish to omit the + // ControllerService entirely from their implementation, but such + // SHOULD NOT be the common case. + // The presence of this capability determines whether the CO will + // attempt to invoke the REQUIRED ControllerService RPCs, as well + // as specific RPCs as indicated by ControllerGetCapabilities. + PluginCapability_Service_CONTROLLER_SERVICE PluginCapability_Service_Type = 1 +) + +var PluginCapability_Service_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CONTROLLER_SERVICE", +} +var PluginCapability_Service_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CONTROLLER_SERVICE": 1, +} + +func (x PluginCapability_Service_Type) String() string { + return proto.EnumName(PluginCapability_Service_Type_name, int32(x)) +} +func (PluginCapability_Service_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{4, 0, 0} +} + +type VolumeCapability_AccessMode_Mode int32 + +const ( + VolumeCapability_AccessMode_UNKNOWN VolumeCapability_AccessMode_Mode = 0 + // Can only be published once as read/write on a single node, at + // any given time. + VolumeCapability_AccessMode_SINGLE_NODE_WRITER VolumeCapability_AccessMode_Mode = 1 + // Can only be published once as readonly on a single node, at + // any given time. + VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 2 + // Can be published as readonly at multiple nodes simultaneously. + VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY VolumeCapability_AccessMode_Mode = 3 + // Can be published at multiple nodes simultaneously. Only one of + // the node can be used as read/write. The rest will be readonly. + VolumeCapability_AccessMode_MULTI_NODE_SINGLE_WRITER VolumeCapability_AccessMode_Mode = 4 + // Can be published as read/write at multiple nodes + // simultaneously. + VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER VolumeCapability_AccessMode_Mode = 5 +) + +var VolumeCapability_AccessMode_Mode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SINGLE_NODE_WRITER", + 2: "SINGLE_NODE_READER_ONLY", + 3: "MULTI_NODE_READER_ONLY", + 4: "MULTI_NODE_SINGLE_WRITER", + 5: "MULTI_NODE_MULTI_WRITER", +} +var VolumeCapability_AccessMode_Mode_value = map[string]int32{ + "UNKNOWN": 0, + "SINGLE_NODE_WRITER": 1, + "SINGLE_NODE_READER_ONLY": 2, + "MULTI_NODE_READER_ONLY": 3, + "MULTI_NODE_SINGLE_WRITER": 4, + "MULTI_NODE_MULTI_WRITER": 5, +} + +func (x VolumeCapability_AccessMode_Mode) String() string { + return proto.EnumName(VolumeCapability_AccessMode_Mode_name, int32(x)) +} +func (VolumeCapability_AccessMode_Mode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{9, 2, 0} +} + +type ControllerServiceCapability_RPC_Type int32 + +const ( + ControllerServiceCapability_RPC_UNKNOWN ControllerServiceCapability_RPC_Type = 0 + ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME ControllerServiceCapability_RPC_Type = 1 + ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME ControllerServiceCapability_RPC_Type = 2 + ControllerServiceCapability_RPC_LIST_VOLUMES ControllerServiceCapability_RPC_Type = 3 + ControllerServiceCapability_RPC_GET_CAPACITY ControllerServiceCapability_RPC_Type = 4 +) + +var ControllerServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CREATE_DELETE_VOLUME", + 2: "PUBLISH_UNPUBLISH_VOLUME", + 3: "LIST_VOLUMES", + 4: "GET_CAPACITY", +} +var ControllerServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "CREATE_DELETE_VOLUME": 1, + "PUBLISH_UNPUBLISH_VOLUME": 2, + "LIST_VOLUMES": 3, + "GET_CAPACITY": 4, +} + +func (x ControllerServiceCapability_RPC_Type) String() string { + return proto.EnumName(ControllerServiceCapability_RPC_Type_name, int32(x)) +} +func (ControllerServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{26, 0, 0} +} + +type NodeServiceCapability_RPC_Type int32 + +const ( + NodeServiceCapability_RPC_UNKNOWN NodeServiceCapability_RPC_Type = 0 + NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME NodeServiceCapability_RPC_Type = 1 +) + +var NodeServiceCapability_RPC_Type_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STAGE_UNSTAGE_VOLUME", +} +var NodeServiceCapability_RPC_Type_value = map[string]int32{ + "UNKNOWN": 0, + "STAGE_UNSTAGE_VOLUME": 1, +} + +func (x NodeServiceCapability_RPC_Type) String() string { + return proto.EnumName(NodeServiceCapability_RPC_Type_name, int32(x)) +} +func (NodeServiceCapability_RPC_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{39, 0, 0} +} + +type GetPluginInfoRequest struct { +} + +func (m *GetPluginInfoRequest) Reset() { *m = GetPluginInfoRequest{} } +func (m *GetPluginInfoRequest) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoRequest) ProtoMessage() {} +func (*GetPluginInfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type GetPluginInfoResponse struct { + // The name MUST follow reverse domain name notation format + // (https://en.wikipedia.org/wiki/Reverse_domain_name_notation). + // It SHOULD include the plugin's host company name and the plugin + // name, to minimize the possibility of collisions. It MUST be 63 + // characters or less, beginning and ending with an alphanumeric + // character ([a-z0-9A-Z]) with dashes (-), underscores (_), + // dots (.), and alphanumerics between. This field is REQUIRED. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // This field is REQUIRED. Value of this field is opaque to the CO. + VendorVersion string `protobuf:"bytes,2,opt,name=vendor_version,json=vendorVersion" json:"vendor_version,omitempty"` + // This field is OPTIONAL. Values are opaque to the CO. + Manifest map[string]string `protobuf:"bytes,3,rep,name=manifest" json:"manifest,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *GetPluginInfoResponse) Reset() { *m = GetPluginInfoResponse{} } +func (m *GetPluginInfoResponse) String() string { return proto.CompactTextString(m) } +func (*GetPluginInfoResponse) ProtoMessage() {} +func (*GetPluginInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *GetPluginInfoResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetPluginInfoResponse) GetVendorVersion() string { + if m != nil { + return m.VendorVersion + } + return "" +} + +func (m *GetPluginInfoResponse) GetManifest() map[string]string { + if m != nil { + return m.Manifest + } + return nil +} + +type GetPluginCapabilitiesRequest struct { +} + +func (m *GetPluginCapabilitiesRequest) Reset() { *m = GetPluginCapabilitiesRequest{} } +func (m *GetPluginCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*GetPluginCapabilitiesRequest) ProtoMessage() {} +func (*GetPluginCapabilitiesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +type GetPluginCapabilitiesResponse struct { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + Capabilities []*PluginCapability `protobuf:"bytes,2,rep,name=capabilities" json:"capabilities,omitempty"` +} + +func (m *GetPluginCapabilitiesResponse) Reset() { *m = GetPluginCapabilitiesResponse{} } +func (m *GetPluginCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*GetPluginCapabilitiesResponse) ProtoMessage() {} +func (*GetPluginCapabilitiesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *GetPluginCapabilitiesResponse) GetCapabilities() []*PluginCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the plugin. +type PluginCapability struct { + // Types that are valid to be assigned to Type: + // *PluginCapability_Service_ + Type isPluginCapability_Type `protobuf_oneof:"type"` +} + +func (m *PluginCapability) Reset() { *m = PluginCapability{} } +func (m *PluginCapability) String() string { return proto.CompactTextString(m) } +func (*PluginCapability) ProtoMessage() {} +func (*PluginCapability) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +type isPluginCapability_Type interface { + isPluginCapability_Type() +} + +type PluginCapability_Service_ struct { + Service *PluginCapability_Service `protobuf:"bytes,1,opt,name=service,oneof"` +} + +func (*PluginCapability_Service_) isPluginCapability_Type() {} + +func (m *PluginCapability) GetType() isPluginCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *PluginCapability) GetService() *PluginCapability_Service { + if x, ok := m.GetType().(*PluginCapability_Service_); ok { + return x.Service + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PluginCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PluginCapability_OneofMarshaler, _PluginCapability_OneofUnmarshaler, _PluginCapability_OneofSizer, []interface{}{ + (*PluginCapability_Service_)(nil), + } +} + +func _PluginCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PluginCapability) + // type + switch x := m.Type.(type) { + case *PluginCapability_Service_: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Service); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("PluginCapability.Type has unexpected type %T", x) + } + return nil +} + +func _PluginCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PluginCapability) + switch tag { + case 1: // type.service + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PluginCapability_Service) + err := b.DecodeMessage(msg) + m.Type = &PluginCapability_Service_{msg} + return true, err + default: + return false, nil + } +} + +func _PluginCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PluginCapability) + // type + switch x := m.Type.(type) { + case *PluginCapability_Service_: + s := proto.Size(x.Service) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type PluginCapability_Service struct { + Type PluginCapability_Service_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.PluginCapability_Service_Type" json:"type,omitempty"` +} + +func (m *PluginCapability_Service) Reset() { *m = PluginCapability_Service{} } +func (m *PluginCapability_Service) String() string { return proto.CompactTextString(m) } +func (*PluginCapability_Service) ProtoMessage() {} +func (*PluginCapability_Service) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } + +func (m *PluginCapability_Service) GetType() PluginCapability_Service_Type { + if m != nil { + return m.Type + } + return PluginCapability_Service_UNKNOWN +} + +type ProbeRequest struct { +} + +func (m *ProbeRequest) Reset() { *m = ProbeRequest{} } +func (m *ProbeRequest) String() string { return proto.CompactTextString(m) } +func (*ProbeRequest) ProtoMessage() {} +func (*ProbeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +type ProbeResponse struct { +} + +func (m *ProbeResponse) Reset() { *m = ProbeResponse{} } +func (m *ProbeResponse) String() string { return proto.CompactTextString(m) } +func (*ProbeResponse) ProtoMessage() {} +func (*ProbeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +type CreateVolumeRequest struct { + // The suggested name for the storage space. This field is REQUIRED. + // It serves two purposes: + // 1) Idempotency - This name is generated by the CO to achieve + // idempotency. If `CreateVolume` fails, the volume may or may not + // be provisioned. In this case, the CO may call `CreateVolume` + // again, with the same name, to ensure the volume exists. The + // Plugin should ensure that multiple `CreateVolume` calls for the + // same name do not result in more than one piece of storage + // provisioned corresponding to that name. If a Plugin is unable to + // enforce idempotency, the CO's error recovery logic could result + // in multiple (unused) volumes being provisioned. + // 2) Suggested name - Some storage systems allow callers to specify + // an identifier by which to refer to the newly provisioned + // storage. If a storage system supports this, it can optionally + // use this name as the identifier for the new volume. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // This field is OPTIONAL. This allows the CO to specify the capacity + // requirement of the volume to be provisioned. If not specified, the + // Plugin MAY choose an implementation-defined capacity range. + CapacityRange *CapacityRange `protobuf:"bytes,2,opt,name=capacity_range,json=capacityRange" json:"capacity_range,omitempty"` + // The capabilities that the provisioned volume MUST have: the Plugin + // MUST provision a volume that could satisfy ALL of the + // capabilities specified in this list. The Plugin MUST assume that + // the CO MAY use the provisioned volume later with ANY of the + // capabilities specified in this list. This also enables the CO to do + // early validation: if ANY of the specified volume capabilities are + // not supported by the Plugin, the call SHALL fail. This field is + // REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,3,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` + // Plugin specific parameters passed in as opaque key-value pairs. + // This field is OPTIONAL. The Plugin is responsible for parsing and + // validating these parameters. COs will treat these as opaque. + Parameters map[string]string `protobuf:"bytes,4,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Secrets required by plugin to complete volume creation request. + // A secret is a string to string map where the key identifies the + // name of the secret (e.g. "username" or "password"), and the value + // contains the secret data (e.g. "bob" or "abc123"). + // Each key MUST consist of alphanumeric characters, '-', '_' or '.'. + // Each value MUST contain a valid string. An SP MAY choose to accept + // binary (non-string) data by using a binary-to-text encoding scheme, + // like base64. + // An SP SHALL advertise the requirements for required secret keys and + // values in documentation. + // CO SHALL permit passing through the required secrets. + // A CO MAY pass the same secrets to all RPCs, therefore the keys for + // all unique secrets that an SP expects must be unique across all CSI + // operations. + // This information is sensitive and MUST be treated as such (not + // logged, etc.) by the CO. + // This field is OPTIONAL. + ControllerCreateSecrets map[string]string `protobuf:"bytes,5,rep,name=controller_create_secrets,json=controllerCreateSecrets" json:"controller_create_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *CreateVolumeRequest) Reset() { *m = CreateVolumeRequest{} } +func (m *CreateVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeRequest) ProtoMessage() {} +func (*CreateVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *CreateVolumeRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *CreateVolumeRequest) GetCapacityRange() *CapacityRange { + if m != nil { + return m.CapacityRange + } + return nil +} + +func (m *CreateVolumeRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *CreateVolumeRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *CreateVolumeRequest) GetControllerCreateSecrets() map[string]string { + if m != nil { + return m.ControllerCreateSecrets + } + return nil +} + +type CreateVolumeResponse struct { + // Contains all attributes of the newly created volume that are + // relevant to the CO along with information required by the Plugin + // to uniquely identify the volume. This field is REQUIRED. + Volume *Volume `protobuf:"bytes,1,opt,name=volume" json:"volume,omitempty"` +} + +func (m *CreateVolumeResponse) Reset() { *m = CreateVolumeResponse{} } +func (m *CreateVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*CreateVolumeResponse) ProtoMessage() {} +func (*CreateVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *CreateVolumeResponse) GetVolume() *Volume { + if m != nil { + return m.Volume + } + return nil +} + +// Specify a capability of a volume. +type VolumeCapability struct { + // Specifies what API the volume will be accessed using. One of the + // following fields MUST be specified. + // + // Types that are valid to be assigned to AccessType: + // *VolumeCapability_Block + // *VolumeCapability_Mount + AccessType isVolumeCapability_AccessType `protobuf_oneof:"access_type"` + // This is a REQUIRED field. + AccessMode *VolumeCapability_AccessMode `protobuf:"bytes,3,opt,name=access_mode,json=accessMode" json:"access_mode,omitempty"` +} + +func (m *VolumeCapability) Reset() { *m = VolumeCapability{} } +func (m *VolumeCapability) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability) ProtoMessage() {} +func (*VolumeCapability) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +type isVolumeCapability_AccessType interface { + isVolumeCapability_AccessType() +} + +type VolumeCapability_Block struct { + Block *VolumeCapability_BlockVolume `protobuf:"bytes,1,opt,name=block,oneof"` +} +type VolumeCapability_Mount struct { + Mount *VolumeCapability_MountVolume `protobuf:"bytes,2,opt,name=mount,oneof"` +} + +func (*VolumeCapability_Block) isVolumeCapability_AccessType() {} +func (*VolumeCapability_Mount) isVolumeCapability_AccessType() {} + +func (m *VolumeCapability) GetAccessType() isVolumeCapability_AccessType { + if m != nil { + return m.AccessType + } + return nil +} + +func (m *VolumeCapability) GetBlock() *VolumeCapability_BlockVolume { + if x, ok := m.GetAccessType().(*VolumeCapability_Block); ok { + return x.Block + } + return nil +} + +func (m *VolumeCapability) GetMount() *VolumeCapability_MountVolume { + if x, ok := m.GetAccessType().(*VolumeCapability_Mount); ok { + return x.Mount + } + return nil +} + +func (m *VolumeCapability) GetAccessMode() *VolumeCapability_AccessMode { + if m != nil { + return m.AccessMode + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*VolumeCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _VolumeCapability_OneofMarshaler, _VolumeCapability_OneofUnmarshaler, _VolumeCapability_OneofSizer, []interface{}{ + (*VolumeCapability_Block)(nil), + (*VolumeCapability_Mount)(nil), + } +} + +func _VolumeCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*VolumeCapability) + // access_type + switch x := m.AccessType.(type) { + case *VolumeCapability_Block: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Block); err != nil { + return err + } + case *VolumeCapability_Mount: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Mount); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("VolumeCapability.AccessType has unexpected type %T", x) + } + return nil +} + +func _VolumeCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*VolumeCapability) + switch tag { + case 1: // access_type.block + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VolumeCapability_BlockVolume) + err := b.DecodeMessage(msg) + m.AccessType = &VolumeCapability_Block{msg} + return true, err + case 2: // access_type.mount + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(VolumeCapability_MountVolume) + err := b.DecodeMessage(msg) + m.AccessType = &VolumeCapability_Mount{msg} + return true, err + default: + return false, nil + } +} + +func _VolumeCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*VolumeCapability) + // access_type + switch x := m.AccessType.(type) { + case *VolumeCapability_Block: + s := proto.Size(x.Block) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *VolumeCapability_Mount: + s := proto.Size(x.Mount) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Indicate that the volume will be accessed via the block device API. +type VolumeCapability_BlockVolume struct { +} + +func (m *VolumeCapability_BlockVolume) Reset() { *m = VolumeCapability_BlockVolume{} } +func (m *VolumeCapability_BlockVolume) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_BlockVolume) ProtoMessage() {} +func (*VolumeCapability_BlockVolume) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} } + +// Indicate that the volume will be accessed via the filesystem API. +type VolumeCapability_MountVolume struct { + // The filesystem type. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + FsType string `protobuf:"bytes,1,opt,name=fs_type,json=fsType" json:"fs_type,omitempty"` + // The mount options that can be used for the volume. This field is + // OPTIONAL. `mount_flags` MAY contain sensitive information. + // Therefore, the CO and the Plugin MUST NOT leak this information + // to untrusted entities. The total size of this repeated field + // SHALL NOT exceed 4 KiB. + MountFlags []string `protobuf:"bytes,2,rep,name=mount_flags,json=mountFlags" json:"mount_flags,omitempty"` +} + +func (m *VolumeCapability_MountVolume) Reset() { *m = VolumeCapability_MountVolume{} } +func (m *VolumeCapability_MountVolume) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_MountVolume) ProtoMessage() {} +func (*VolumeCapability_MountVolume) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 1} } + +func (m *VolumeCapability_MountVolume) GetFsType() string { + if m != nil { + return m.FsType + } + return "" +} + +func (m *VolumeCapability_MountVolume) GetMountFlags() []string { + if m != nil { + return m.MountFlags + } + return nil +} + +// Specify how a volume can be accessed. +type VolumeCapability_AccessMode struct { + // This field is REQUIRED. + Mode VolumeCapability_AccessMode_Mode `protobuf:"varint,1,opt,name=mode,enum=csi.v0.VolumeCapability_AccessMode_Mode" json:"mode,omitempty"` +} + +func (m *VolumeCapability_AccessMode) Reset() { *m = VolumeCapability_AccessMode{} } +func (m *VolumeCapability_AccessMode) String() string { return proto.CompactTextString(m) } +func (*VolumeCapability_AccessMode) ProtoMessage() {} +func (*VolumeCapability_AccessMode) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 2} } + +func (m *VolumeCapability_AccessMode) GetMode() VolumeCapability_AccessMode_Mode { + if m != nil { + return m.Mode + } + return VolumeCapability_AccessMode_UNKNOWN +} + +// The capacity of the storage space in bytes. To specify an exact size, +// `required_bytes` and `limit_bytes` can be set to the same value. At +// least one of the these fields MUST be specified. +type CapacityRange struct { + // Volume must be at least this big. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + RequiredBytes int64 `protobuf:"varint,1,opt,name=required_bytes,json=requiredBytes" json:"required_bytes,omitempty"` + // Volume must not be bigger than this. This field is OPTIONAL. + // A value of 0 is equal to an unspecified field value. + // The value of this field MUST NOT be negative. + LimitBytes int64 `protobuf:"varint,2,opt,name=limit_bytes,json=limitBytes" json:"limit_bytes,omitempty"` +} + +func (m *CapacityRange) Reset() { *m = CapacityRange{} } +func (m *CapacityRange) String() string { return proto.CompactTextString(m) } +func (*CapacityRange) ProtoMessage() {} +func (*CapacityRange) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *CapacityRange) GetRequiredBytes() int64 { + if m != nil { + return m.RequiredBytes + } + return 0 +} + +func (m *CapacityRange) GetLimitBytes() int64 { + if m != nil { + return m.LimitBytes + } + return 0 +} + +// The information about a provisioned volume. +type Volume struct { + // The capacity of the volume in bytes. This field is OPTIONAL. If not + // set (value of 0), it indicates that the capacity of the volume is + // unknown (e.g., NFS share). + // The value of this field MUST NOT be negative. + CapacityBytes int64 `protobuf:"varint,1,opt,name=capacity_bytes,json=capacityBytes" json:"capacity_bytes,omitempty"` + // Contains identity information for the created volume. This field is + // REQUIRED. The identity information will be used by the CO in + // subsequent calls to refer to the provisioned volume. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // Attributes reflect static properties of a volume and MUST be passed + // to volume validation and publishing calls. + // Attributes SHALL be opaque to a CO. Attributes SHALL NOT be mutable + // and SHALL be safe for the CO to cache. Attributes SHOULD NOT + // contain sensitive information. Attributes MAY NOT uniquely identify + // a volume. A volume uniquely identified by `id` SHALL always report + // the same attributes. This field is OPTIONAL and when present MUST + // be passed to volume validation and publishing calls. + Attributes map[string]string `protobuf:"bytes,3,rep,name=attributes" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Volume) Reset() { *m = Volume{} } +func (m *Volume) String() string { return proto.CompactTextString(m) } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *Volume) GetCapacityBytes() int64 { + if m != nil { + return m.CapacityBytes + } + return 0 +} + +func (m *Volume) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Volume) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +type DeleteVolumeRequest struct { + // The ID of the volume to be deprovisioned. + // This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // Secrets required by plugin to complete volume deletion request. + // A secret is a string to string map where the key identifies the + // name of the secret (e.g. "username" or "password"), and the value + // contains the secret data (e.g. "bob" or "abc123"). + // Each key MUST consist of alphanumeric characters, '-', '_' or '.'. + // Each value MUST contain a valid string. An SP MAY choose to accept + // binary (non-string) data by using a binary-to-text encoding scheme, + // like base64. + // An SP SHALL advertise the requirements for required secret keys and + // values in documentation. + // CO SHALL permit passing through the required secrets. + // A CO MAY pass the same secrets to all RPCs, therefore the keys for + // all unique secrets that an SP expects must be unique across all CSI + // operations. + // This information is sensitive and MUST be treated as such (not + // logged, etc.) by the CO. + // This field is OPTIONAL. + ControllerDeleteSecrets map[string]string `protobuf:"bytes,2,rep,name=controller_delete_secrets,json=controllerDeleteSecrets" json:"controller_delete_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *DeleteVolumeRequest) Reset() { *m = DeleteVolumeRequest{} } +func (m *DeleteVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeRequest) ProtoMessage() {} +func (*DeleteVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *DeleteVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *DeleteVolumeRequest) GetControllerDeleteSecrets() map[string]string { + if m != nil { + return m.ControllerDeleteSecrets + } + return nil +} + +type DeleteVolumeResponse struct { +} + +func (m *DeleteVolumeResponse) Reset() { *m = DeleteVolumeResponse{} } +func (m *DeleteVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteVolumeResponse) ProtoMessage() {} +func (*DeleteVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +type ControllerPublishVolumeRequest struct { + // The ID of the volume to be used on a node. + // This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The ID of the node. This field is REQUIRED. The CO SHALL set this + // field to match the node ID returned by `NodeGetId`. + NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,3,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + Readonly bool `protobuf:"varint,4,opt,name=readonly" json:"readonly,omitempty"` + // Secrets required by plugin to complete controller publish volume + // request. + // A secret is a string to string map where the key identifies the + // name of the secret (e.g. "username" or "password"), and the value + // contains the secret data (e.g. "bob" or "abc123"). + // Each key MUST consist of alphanumeric characters, '-', '_' or '.'. + // Each value MUST contain a valid string. An SP MAY choose to accept + // binary (non-string) data by using a binary-to-text encoding scheme, + // like base64. + // An SP SHALL advertise the requirements for required secret keys and + // values in documentation. + // CO SHALL permit passing through the required secrets. + // A CO MAY pass the same secrets to all RPCs, therefore the keys for + // all unique secrets that an SP expects must be unique across all CSI + // operations. + // This information is sensitive and MUST be treated as such (not + // logged, etc.) by the CO. + // This field is OPTIONAL. + ControllerPublishSecrets map[string]string `protobuf:"bytes,5,rep,name=controller_publish_secrets,json=controllerPublishSecrets" json:"controller_publish_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Attributes of the volume to be used on a node. This field is + // OPTIONAL and MUST match the attributes of the Volume identified + // by `volume_id`. + VolumeAttributes map[string]string `protobuf:"bytes,6,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *ControllerPublishVolumeRequest) Reset() { *m = ControllerPublishVolumeRequest{} } +func (m *ControllerPublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeRequest) ProtoMessage() {} +func (*ControllerPublishVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *ControllerPublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ControllerPublishVolumeRequest) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *ControllerPublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *ControllerPublishVolumeRequest) GetControllerPublishSecrets() map[string]string { + if m != nil { + return m.ControllerPublishSecrets + } + return nil +} + +func (m *ControllerPublishVolumeRequest) GetVolumeAttributes() map[string]string { + if m != nil { + return m.VolumeAttributes + } + return nil +} + +type ControllerPublishVolumeResponse struct { + // The SP specific information that will be passed to the Plugin in + // the subsequent `NodeStageVolume` or `NodePublishVolume` calls + // for the given volume. + // This information is opaque to the CO. This field is OPTIONAL. + PublishInfo map[string]string `protobuf:"bytes,1,rep,name=publish_info,json=publishInfo" json:"publish_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *ControllerPublishVolumeResponse) Reset() { *m = ControllerPublishVolumeResponse{} } +func (m *ControllerPublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerPublishVolumeResponse) ProtoMessage() {} +func (*ControllerPublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{15} +} + +func (m *ControllerPublishVolumeResponse) GetPublishInfo() map[string]string { + if m != nil { + return m.PublishInfo + } + return nil +} + +type ControllerUnpublishVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The ID of the node. This field is OPTIONAL. The CO SHOULD set this + // field to match the node ID returned by `NodeGetId` or leave it + // unset. If the value is set, the SP MUST unpublish the volume from + // the specified node. If the value is unset, the SP MUST unpublish + // the volume from all nodes it is published to. + NodeId string `protobuf:"bytes,2,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + // Secrets required by plugin to complete controller unpublish volume + // request. This SHOULD be the same secrets passed to the + // ControllerPublishVolume. + // call for the specified volume. + // A secret is a string to string map where the key identifies the + // name of the secret (e.g. "username" or "password"), and the value + // contains the secret data (e.g. "bob" or "abc123"). + // Each key MUST consist of alphanumeric characters, '-', '_' or '.'. + // Each value MUST contain a valid string. An SP MAY choose to accept + // binary (non-string) data by using a binary-to-text encoding scheme, + // like base64. + // An SP SHALL advertise the requirements for required secret keys and + // values in documentation. + // CO SHALL permit passing through the required secrets. + // A CO MAY pass the same secrets to all RPCs, therefore the keys for + // all unique secrets that an SP expects must be unique across all CSI + // operations. + // This information is sensitive and MUST be treated as such (not + // logged, etc.) by the CO. + // This field is OPTIONAL. + ControllerUnpublishSecrets map[string]string `protobuf:"bytes,3,rep,name=controller_unpublish_secrets,json=controllerUnpublishSecrets" json:"controller_unpublish_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *ControllerUnpublishVolumeRequest) Reset() { *m = ControllerUnpublishVolumeRequest{} } +func (m *ControllerUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeRequest) ProtoMessage() {} +func (*ControllerUnpublishVolumeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{16} +} + +func (m *ControllerUnpublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ControllerUnpublishVolumeRequest) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +func (m *ControllerUnpublishVolumeRequest) GetControllerUnpublishSecrets() map[string]string { + if m != nil { + return m.ControllerUnpublishSecrets + } + return nil +} + +type ControllerUnpublishVolumeResponse struct { +} + +func (m *ControllerUnpublishVolumeResponse) Reset() { *m = ControllerUnpublishVolumeResponse{} } +func (m *ControllerUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerUnpublishVolumeResponse) ProtoMessage() {} +func (*ControllerUnpublishVolumeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{17} +} + +type ValidateVolumeCapabilitiesRequest struct { + // The ID of the volume to check. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The capabilities that the CO wants to check for the volume. This + // call SHALL return "supported" only if all the volume capabilities + // specified below are supported. This field is REQUIRED. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,2,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` + // Attributes of the volume to check. This field is OPTIONAL and MUST + // match the attributes of the Volume identified by `volume_id`. + VolumeAttributes map[string]string `protobuf:"bytes,3,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *ValidateVolumeCapabilitiesRequest) Reset() { *m = ValidateVolumeCapabilitiesRequest{} } +func (m *ValidateVolumeCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesRequest) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{18} +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *ValidateVolumeCapabilitiesRequest) GetVolumeAttributes() map[string]string { + if m != nil { + return m.VolumeAttributes + } + return nil +} + +type ValidateVolumeCapabilitiesResponse struct { + // True if the Plugin supports the specified capabilities for the + // given volume. This field is REQUIRED. + Supported bool `protobuf:"varint,1,opt,name=supported" json:"supported,omitempty"` + // Message to the CO if `supported` above is false. This field is + // OPTIONAL. + // An empty string is equal to an unspecified field value. + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` +} + +func (m *ValidateVolumeCapabilitiesResponse) Reset() { *m = ValidateVolumeCapabilitiesResponse{} } +func (m *ValidateVolumeCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ValidateVolumeCapabilitiesResponse) ProtoMessage() {} +func (*ValidateVolumeCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{19} +} + +func (m *ValidateVolumeCapabilitiesResponse) GetSupported() bool { + if m != nil { + return m.Supported + } + return false +} + +func (m *ValidateVolumeCapabilitiesResponse) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +type ListVolumesRequest struct { + // If specified (non-zero value), the Plugin MUST NOT return more + // entries than this number in the response. If the actual number of + // entries is more than this number, the Plugin MUST set `next_token` + // in the response which can be used to get the next page of entries + // in the subsequent `ListVolumes` call. This field is OPTIONAL. If + // not specified (zero value), it means there is no restriction on the + // number of entries that can be returned. + // The value of this field MUST NOT be negative. + MaxEntries int32 `protobuf:"varint,1,opt,name=max_entries,json=maxEntries" json:"max_entries,omitempty"` + // A token to specify where to start paginating. Set this field to + // `next_token` returned by a previous `ListVolumes` call to get the + // next page of entries. This field is OPTIONAL. + // An empty string is equal to an unspecified field value. + StartingToken string `protobuf:"bytes,2,opt,name=starting_token,json=startingToken" json:"starting_token,omitempty"` +} + +func (m *ListVolumesRequest) Reset() { *m = ListVolumesRequest{} } +func (m *ListVolumesRequest) String() string { return proto.CompactTextString(m) } +func (*ListVolumesRequest) ProtoMessage() {} +func (*ListVolumesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *ListVolumesRequest) GetMaxEntries() int32 { + if m != nil { + return m.MaxEntries + } + return 0 +} + +func (m *ListVolumesRequest) GetStartingToken() string { + if m != nil { + return m.StartingToken + } + return "" +} + +type ListVolumesResponse struct { + Entries []*ListVolumesResponse_Entry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + // This token allows you to get the next page of entries for + // `ListVolumes` request. If the number of entries is larger than + // `max_entries`, use the `next_token` as a value for the + // `starting_token` field in the next `ListVolumes` request. This + // field is OPTIONAL. + // An empty string is equal to an unspecified field value. + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken" json:"next_token,omitempty"` +} + +func (m *ListVolumesResponse) Reset() { *m = ListVolumesResponse{} } +func (m *ListVolumesResponse) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse) ProtoMessage() {} +func (*ListVolumesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *ListVolumesResponse) GetEntries() []*ListVolumesResponse_Entry { + if m != nil { + return m.Entries + } + return nil +} + +func (m *ListVolumesResponse) GetNextToken() string { + if m != nil { + return m.NextToken + } + return "" +} + +type ListVolumesResponse_Entry struct { + Volume *Volume `protobuf:"bytes,1,opt,name=volume" json:"volume,omitempty"` +} + +func (m *ListVolumesResponse_Entry) Reset() { *m = ListVolumesResponse_Entry{} } +func (m *ListVolumesResponse_Entry) String() string { return proto.CompactTextString(m) } +func (*ListVolumesResponse_Entry) ProtoMessage() {} +func (*ListVolumesResponse_Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21, 0} } + +func (m *ListVolumesResponse_Entry) GetVolume() *Volume { + if m != nil { + return m.Volume + } + return nil +} + +type GetCapacityRequest struct { + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes that satisfy ALL of the + // specified `volume_capabilities`. These are the same + // `volume_capabilities` the CO will use in `CreateVolumeRequest`. + // This field is OPTIONAL. + VolumeCapabilities []*VolumeCapability `protobuf:"bytes,1,rep,name=volume_capabilities,json=volumeCapabilities" json:"volume_capabilities,omitempty"` + // If specified, the Plugin SHALL report the capacity of the storage + // that can be used to provision volumes with the given Plugin + // specific `parameters`. These are the same `parameters` the CO will + // use in `CreateVolumeRequest`. This field is OPTIONAL. + Parameters map[string]string `protobuf:"bytes,2,rep,name=parameters" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *GetCapacityRequest) Reset() { *m = GetCapacityRequest{} } +func (m *GetCapacityRequest) String() string { return proto.CompactTextString(m) } +func (*GetCapacityRequest) ProtoMessage() {} +func (*GetCapacityRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *GetCapacityRequest) GetVolumeCapabilities() []*VolumeCapability { + if m != nil { + return m.VolumeCapabilities + } + return nil +} + +func (m *GetCapacityRequest) GetParameters() map[string]string { + if m != nil { + return m.Parameters + } + return nil +} + +type GetCapacityResponse struct { + // The available capacity of the storage that can be used to + // provision volumes. If `volume_capabilities` or `parameters` is + // specified in the request, the Plugin SHALL take those into + // consideration when calculating the available capacity of the + // storage. This field is REQUIRED. + // The value of this field MUST NOT be negative. + AvailableCapacity int64 `protobuf:"varint,1,opt,name=available_capacity,json=availableCapacity" json:"available_capacity,omitempty"` +} + +func (m *GetCapacityResponse) Reset() { *m = GetCapacityResponse{} } +func (m *GetCapacityResponse) String() string { return proto.CompactTextString(m) } +func (*GetCapacityResponse) ProtoMessage() {} +func (*GetCapacityResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *GetCapacityResponse) GetAvailableCapacity() int64 { + if m != nil { + return m.AvailableCapacity + } + return 0 +} + +type ControllerGetCapabilitiesRequest struct { +} + +func (m *ControllerGetCapabilitiesRequest) Reset() { *m = ControllerGetCapabilitiesRequest{} } +func (m *ControllerGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesRequest) ProtoMessage() {} +func (*ControllerGetCapabilitiesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{24} +} + +type ControllerGetCapabilitiesResponse struct { + // All the capabilities that the controller service supports. This + // field is OPTIONAL. + Capabilities []*ControllerServiceCapability `protobuf:"bytes,2,rep,name=capabilities" json:"capabilities,omitempty"` +} + +func (m *ControllerGetCapabilitiesResponse) Reset() { *m = ControllerGetCapabilitiesResponse{} } +func (m *ControllerGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*ControllerGetCapabilitiesResponse) ProtoMessage() {} +func (*ControllerGetCapabilitiesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{25} +} + +func (m *ControllerGetCapabilitiesResponse) GetCapabilities() []*ControllerServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the controller service. +type ControllerServiceCapability struct { + // Types that are valid to be assigned to Type: + // *ControllerServiceCapability_Rpc + Type isControllerServiceCapability_Type `protobuf_oneof:"type"` +} + +func (m *ControllerServiceCapability) Reset() { *m = ControllerServiceCapability{} } +func (m *ControllerServiceCapability) String() string { return proto.CompactTextString(m) } +func (*ControllerServiceCapability) ProtoMessage() {} +func (*ControllerServiceCapability) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +type isControllerServiceCapability_Type interface { + isControllerServiceCapability_Type() +} + +type ControllerServiceCapability_Rpc struct { + Rpc *ControllerServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,oneof"` +} + +func (*ControllerServiceCapability_Rpc) isControllerServiceCapability_Type() {} + +func (m *ControllerServiceCapability) GetType() isControllerServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *ControllerServiceCapability) GetRpc() *ControllerServiceCapability_RPC { + if x, ok := m.GetType().(*ControllerServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ControllerServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ControllerServiceCapability_OneofMarshaler, _ControllerServiceCapability_OneofUnmarshaler, _ControllerServiceCapability_OneofSizer, []interface{}{ + (*ControllerServiceCapability_Rpc)(nil), + } +} + +func _ControllerServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ControllerServiceCapability) + // type + switch x := m.Type.(type) { + case *ControllerServiceCapability_Rpc: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Rpc); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ControllerServiceCapability.Type has unexpected type %T", x) + } + return nil +} + +func _ControllerServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ControllerServiceCapability) + switch tag { + case 1: // type.rpc + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ControllerServiceCapability_RPC) + err := b.DecodeMessage(msg) + m.Type = &ControllerServiceCapability_Rpc{msg} + return true, err + default: + return false, nil + } +} + +func _ControllerServiceCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ControllerServiceCapability) + // type + switch x := m.Type.(type) { + case *ControllerServiceCapability_Rpc: + s := proto.Size(x.Rpc) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ControllerServiceCapability_RPC struct { + Type ControllerServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.ControllerServiceCapability_RPC_Type" json:"type,omitempty"` +} + +func (m *ControllerServiceCapability_RPC) Reset() { *m = ControllerServiceCapability_RPC{} } +func (m *ControllerServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*ControllerServiceCapability_RPC) ProtoMessage() {} +func (*ControllerServiceCapability_RPC) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{26, 0} +} + +func (m *ControllerServiceCapability_RPC) GetType() ControllerServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return ControllerServiceCapability_RPC_UNKNOWN +} + +type NodeStageVolumeRequest struct { + // The ID of the volume to publish. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + PublishInfo map[string]string `protobuf:"bytes,2,rep,name=publish_info,json=publishInfo" json:"publish_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure that there is only one + // staging_target_path per volume. + // This is a REQUIRED field. + StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath" json:"staging_target_path,omitempty"` + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,4,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` + // Secrets required by plugin to complete node stage volume request. + // A secret is a string to string map where the key identifies the + // name of the secret (e.g. "username" or "password"), and the value + // contains the secret data (e.g. "bob" or "abc123"). + // Each key MUST consist of alphanumeric characters, '-', '_' or '.'. + // Each value MUST contain a valid string. An SP MAY choose to accept + // binary (non-string) data by using a binary-to-text encoding scheme, + // like base64. + // An SP SHALL advertise the requirements for required secret keys and + // values in documentation. + // CO SHALL permit passing through the required secrets. + // A CO MAY pass the same secrets to all RPCs, therefore the keys for + // all unique secrets that an SP expects must be unique across all CSI + // operations. + // This information is sensitive and MUST be treated as such (not + // logged, etc.) by the CO. + // This field is OPTIONAL. + NodeStageSecrets map[string]string `protobuf:"bytes,5,rep,name=node_stage_secrets,json=nodeStageSecrets" json:"node_stage_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Attributes of the volume to publish. This field is OPTIONAL and + // MUST match the attributes of the VolumeInfo identified by + // `volume_id`. + VolumeAttributes map[string]string `protobuf:"bytes,6,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *NodeStageVolumeRequest) Reset() { *m = NodeStageVolumeRequest{} } +func (m *NodeStageVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeStageVolumeRequest) ProtoMessage() {} +func (*NodeStageVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *NodeStageVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeStageVolumeRequest) GetPublishInfo() map[string]string { + if m != nil { + return m.PublishInfo + } + return nil +} + +func (m *NodeStageVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +func (m *NodeStageVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *NodeStageVolumeRequest) GetNodeStageSecrets() map[string]string { + if m != nil { + return m.NodeStageSecrets + } + return nil +} + +func (m *NodeStageVolumeRequest) GetVolumeAttributes() map[string]string { + if m != nil { + return m.VolumeAttributes + } + return nil +} + +type NodeStageVolumeResponse struct { +} + +func (m *NodeStageVolumeResponse) Reset() { *m = NodeStageVolumeResponse{} } +func (m *NodeStageVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeStageVolumeResponse) ProtoMessage() {} +func (*NodeStageVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +type NodeUnstageVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + StagingTargetPath string `protobuf:"bytes,2,opt,name=staging_target_path,json=stagingTargetPath" json:"staging_target_path,omitempty"` +} + +func (m *NodeUnstageVolumeRequest) Reset() { *m = NodeUnstageVolumeRequest{} } +func (m *NodeUnstageVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeUnstageVolumeRequest) ProtoMessage() {} +func (*NodeUnstageVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *NodeUnstageVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeUnstageVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +type NodeUnstageVolumeResponse struct { +} + +func (m *NodeUnstageVolumeResponse) Reset() { *m = NodeUnstageVolumeResponse{} } +func (m *NodeUnstageVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeUnstageVolumeResponse) ProtoMessage() {} +func (*NodeUnstageVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type NodePublishVolumeRequest struct { + // The ID of the volume to publish. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The CO SHALL set this field to the value returned by + // `ControllerPublishVolume` if the corresponding Controller Plugin + // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be + // left unset if the corresponding Controller Plugin does not have + // this capability. This is an OPTIONAL field. + PublishInfo map[string]string `protobuf:"bytes,2,rep,name=publish_info,json=publishInfo" json:"publish_info,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // The path to which the device was mounted by `NodeStageVolume`. + // It MUST be an absolute path in the root filesystem of the process + // serving this request. + // It MUST be set if the Node Plugin implements the + // `STAGE_UNSTAGE_VOLUME` node capability. + // This is an OPTIONAL field. + StagingTargetPath string `protobuf:"bytes,3,opt,name=staging_target_path,json=stagingTargetPath" json:"staging_target_path,omitempty"` + // The path to which the volume will be published. It MUST be an + // absolute path in the root filesystem of the process serving this + // request. The CO SHALL ensure uniqueness of target_path per volume. + // The CO SHALL ensure that the path exists, and that the process + // serving the request has `read` and `write` permissions to the path. + // This is a REQUIRED field. + TargetPath string `protobuf:"bytes,4,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` + // The capability of the volume the CO expects the volume to have. + // This is a REQUIRED field. + VolumeCapability *VolumeCapability `protobuf:"bytes,5,opt,name=volume_capability,json=volumeCapability" json:"volume_capability,omitempty"` + // Whether to publish the volume in readonly mode. This field is + // REQUIRED. + Readonly bool `protobuf:"varint,6,opt,name=readonly" json:"readonly,omitempty"` + // Secrets required by plugin to complete node publish volume request. + // A secret is a string to string map where the key identifies the + // name of the secret (e.g. "username" or "password"), and the value + // contains the secret data (e.g. "bob" or "abc123"). + // Each key MUST consist of alphanumeric characters, '-', '_' or '.'. + // Each value MUST contain a valid string. An SP MAY choose to accept + // binary (non-string) data by using a binary-to-text encoding scheme, + // like base64. + // An SP SHALL advertise the requirements for required secret keys and + // values in documentation. + // CO SHALL permit passing through the required secrets. + // A CO MAY pass the same secrets to all RPCs, therefore the keys for + // all unique secrets that an SP expects must be unique across all CSI + // operations. + // This information is sensitive and MUST be treated as such (not + // logged, etc.) by the CO. + // This field is OPTIONAL. + NodePublishSecrets map[string]string `protobuf:"bytes,7,rep,name=node_publish_secrets,json=nodePublishSecrets" json:"node_publish_secrets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Attributes of the volume to publish. This field is OPTIONAL and + // MUST match the attributes of the Volume identified by + // `volume_id`. + VolumeAttributes map[string]string `protobuf:"bytes,8,rep,name=volume_attributes,json=volumeAttributes" json:"volume_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *NodePublishVolumeRequest) Reset() { *m = NodePublishVolumeRequest{} } +func (m *NodePublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeRequest) ProtoMessage() {} +func (*NodePublishVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *NodePublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodePublishVolumeRequest) GetPublishInfo() map[string]string { + if m != nil { + return m.PublishInfo + } + return nil +} + +func (m *NodePublishVolumeRequest) GetStagingTargetPath() string { + if m != nil { + return m.StagingTargetPath + } + return "" +} + +func (m *NodePublishVolumeRequest) GetTargetPath() string { + if m != nil { + return m.TargetPath + } + return "" +} + +func (m *NodePublishVolumeRequest) GetVolumeCapability() *VolumeCapability { + if m != nil { + return m.VolumeCapability + } + return nil +} + +func (m *NodePublishVolumeRequest) GetReadonly() bool { + if m != nil { + return m.Readonly + } + return false +} + +func (m *NodePublishVolumeRequest) GetNodePublishSecrets() map[string]string { + if m != nil { + return m.NodePublishSecrets + } + return nil +} + +func (m *NodePublishVolumeRequest) GetVolumeAttributes() map[string]string { + if m != nil { + return m.VolumeAttributes + } + return nil +} + +type NodePublishVolumeResponse struct { +} + +func (m *NodePublishVolumeResponse) Reset() { *m = NodePublishVolumeResponse{} } +func (m *NodePublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodePublishVolumeResponse) ProtoMessage() {} +func (*NodePublishVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +type NodeUnpublishVolumeRequest struct { + // The ID of the volume. This field is REQUIRED. + VolumeId string `protobuf:"bytes,1,opt,name=volume_id,json=volumeId" json:"volume_id,omitempty"` + // The path at which the volume was published. It MUST be an absolute + // path in the root filesystem of the process serving this request. + // This is a REQUIRED field. + TargetPath string `protobuf:"bytes,2,opt,name=target_path,json=targetPath" json:"target_path,omitempty"` +} + +func (m *NodeUnpublishVolumeRequest) Reset() { *m = NodeUnpublishVolumeRequest{} } +func (m *NodeUnpublishVolumeRequest) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeRequest) ProtoMessage() {} +func (*NodeUnpublishVolumeRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *NodeUnpublishVolumeRequest) GetVolumeId() string { + if m != nil { + return m.VolumeId + } + return "" +} + +func (m *NodeUnpublishVolumeRequest) GetTargetPath() string { + if m != nil { + return m.TargetPath + } + return "" +} + +type NodeUnpublishVolumeResponse struct { +} + +func (m *NodeUnpublishVolumeResponse) Reset() { *m = NodeUnpublishVolumeResponse{} } +func (m *NodeUnpublishVolumeResponse) String() string { return proto.CompactTextString(m) } +func (*NodeUnpublishVolumeResponse) ProtoMessage() {} +func (*NodeUnpublishVolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +type NodeGetIdRequest struct { +} + +func (m *NodeGetIdRequest) Reset() { *m = NodeGetIdRequest{} } +func (m *NodeGetIdRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetIdRequest) ProtoMessage() {} +func (*NodeGetIdRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +type NodeGetIdResponse struct { + // The ID of the node as understood by the SP which SHALL be used by + // CO in subsequent `ControllerPublishVolume`. + // This is a REQUIRED field. + NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` +} + +func (m *NodeGetIdResponse) Reset() { *m = NodeGetIdResponse{} } +func (m *NodeGetIdResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetIdResponse) ProtoMessage() {} +func (*NodeGetIdResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *NodeGetIdResponse) GetNodeId() string { + if m != nil { + return m.NodeId + } + return "" +} + +type NodeGetCapabilitiesRequest struct { +} + +func (m *NodeGetCapabilitiesRequest) Reset() { *m = NodeGetCapabilitiesRequest{} } +func (m *NodeGetCapabilitiesRequest) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesRequest) ProtoMessage() {} +func (*NodeGetCapabilitiesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +type NodeGetCapabilitiesResponse struct { + // All the capabilities that the node service supports. This field + // is OPTIONAL. + Capabilities []*NodeServiceCapability `protobuf:"bytes,1,rep,name=capabilities" json:"capabilities,omitempty"` +} + +func (m *NodeGetCapabilitiesResponse) Reset() { *m = NodeGetCapabilitiesResponse{} } +func (m *NodeGetCapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*NodeGetCapabilitiesResponse) ProtoMessage() {} +func (*NodeGetCapabilitiesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +func (m *NodeGetCapabilitiesResponse) GetCapabilities() []*NodeServiceCapability { + if m != nil { + return m.Capabilities + } + return nil +} + +// Specifies a capability of the node service. +type NodeServiceCapability struct { + // Types that are valid to be assigned to Type: + // *NodeServiceCapability_Rpc + Type isNodeServiceCapability_Type `protobuf_oneof:"type"` +} + +func (m *NodeServiceCapability) Reset() { *m = NodeServiceCapability{} } +func (m *NodeServiceCapability) String() string { return proto.CompactTextString(m) } +func (*NodeServiceCapability) ProtoMessage() {} +func (*NodeServiceCapability) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +type isNodeServiceCapability_Type interface { + isNodeServiceCapability_Type() +} + +type NodeServiceCapability_Rpc struct { + Rpc *NodeServiceCapability_RPC `protobuf:"bytes,1,opt,name=rpc,oneof"` +} + +func (*NodeServiceCapability_Rpc) isNodeServiceCapability_Type() {} + +func (m *NodeServiceCapability) GetType() isNodeServiceCapability_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *NodeServiceCapability) GetRpc() *NodeServiceCapability_RPC { + if x, ok := m.GetType().(*NodeServiceCapability_Rpc); ok { + return x.Rpc + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NodeServiceCapability) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NodeServiceCapability_OneofMarshaler, _NodeServiceCapability_OneofUnmarshaler, _NodeServiceCapability_OneofSizer, []interface{}{ + (*NodeServiceCapability_Rpc)(nil), + } +} + +func _NodeServiceCapability_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NodeServiceCapability) + // type + switch x := m.Type.(type) { + case *NodeServiceCapability_Rpc: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Rpc); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("NodeServiceCapability.Type has unexpected type %T", x) + } + return nil +} + +func _NodeServiceCapability_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NodeServiceCapability) + switch tag { + case 1: // type.rpc + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NodeServiceCapability_RPC) + err := b.DecodeMessage(msg) + m.Type = &NodeServiceCapability_Rpc{msg} + return true, err + default: + return false, nil + } +} + +func _NodeServiceCapability_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NodeServiceCapability) + // type + switch x := m.Type.(type) { + case *NodeServiceCapability_Rpc: + s := proto.Size(x.Rpc) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type NodeServiceCapability_RPC struct { + Type NodeServiceCapability_RPC_Type `protobuf:"varint,1,opt,name=type,enum=csi.v0.NodeServiceCapability_RPC_Type" json:"type,omitempty"` +} + +func (m *NodeServiceCapability_RPC) Reset() { *m = NodeServiceCapability_RPC{} } +func (m *NodeServiceCapability_RPC) String() string { return proto.CompactTextString(m) } +func (*NodeServiceCapability_RPC) ProtoMessage() {} +func (*NodeServiceCapability_RPC) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39, 0} } + +func (m *NodeServiceCapability_RPC) GetType() NodeServiceCapability_RPC_Type { + if m != nil { + return m.Type + } + return NodeServiceCapability_RPC_UNKNOWN +} + +func init() { + proto.RegisterType((*GetPluginInfoRequest)(nil), "csi.v0.GetPluginInfoRequest") + proto.RegisterType((*GetPluginInfoResponse)(nil), "csi.v0.GetPluginInfoResponse") + proto.RegisterType((*GetPluginCapabilitiesRequest)(nil), "csi.v0.GetPluginCapabilitiesRequest") + proto.RegisterType((*GetPluginCapabilitiesResponse)(nil), "csi.v0.GetPluginCapabilitiesResponse") + proto.RegisterType((*PluginCapability)(nil), "csi.v0.PluginCapability") + proto.RegisterType((*PluginCapability_Service)(nil), "csi.v0.PluginCapability.Service") + proto.RegisterType((*ProbeRequest)(nil), "csi.v0.ProbeRequest") + proto.RegisterType((*ProbeResponse)(nil), "csi.v0.ProbeResponse") + proto.RegisterType((*CreateVolumeRequest)(nil), "csi.v0.CreateVolumeRequest") + proto.RegisterType((*CreateVolumeResponse)(nil), "csi.v0.CreateVolumeResponse") + proto.RegisterType((*VolumeCapability)(nil), "csi.v0.VolumeCapability") + proto.RegisterType((*VolumeCapability_BlockVolume)(nil), "csi.v0.VolumeCapability.BlockVolume") + proto.RegisterType((*VolumeCapability_MountVolume)(nil), "csi.v0.VolumeCapability.MountVolume") + proto.RegisterType((*VolumeCapability_AccessMode)(nil), "csi.v0.VolumeCapability.AccessMode") + proto.RegisterType((*CapacityRange)(nil), "csi.v0.CapacityRange") + proto.RegisterType((*Volume)(nil), "csi.v0.Volume") + proto.RegisterType((*DeleteVolumeRequest)(nil), "csi.v0.DeleteVolumeRequest") + proto.RegisterType((*DeleteVolumeResponse)(nil), "csi.v0.DeleteVolumeResponse") + proto.RegisterType((*ControllerPublishVolumeRequest)(nil), "csi.v0.ControllerPublishVolumeRequest") + proto.RegisterType((*ControllerPublishVolumeResponse)(nil), "csi.v0.ControllerPublishVolumeResponse") + proto.RegisterType((*ControllerUnpublishVolumeRequest)(nil), "csi.v0.ControllerUnpublishVolumeRequest") + proto.RegisterType((*ControllerUnpublishVolumeResponse)(nil), "csi.v0.ControllerUnpublishVolumeResponse") + proto.RegisterType((*ValidateVolumeCapabilitiesRequest)(nil), "csi.v0.ValidateVolumeCapabilitiesRequest") + proto.RegisterType((*ValidateVolumeCapabilitiesResponse)(nil), "csi.v0.ValidateVolumeCapabilitiesResponse") + proto.RegisterType((*ListVolumesRequest)(nil), "csi.v0.ListVolumesRequest") + proto.RegisterType((*ListVolumesResponse)(nil), "csi.v0.ListVolumesResponse") + proto.RegisterType((*ListVolumesResponse_Entry)(nil), "csi.v0.ListVolumesResponse.Entry") + proto.RegisterType((*GetCapacityRequest)(nil), "csi.v0.GetCapacityRequest") + proto.RegisterType((*GetCapacityResponse)(nil), "csi.v0.GetCapacityResponse") + proto.RegisterType((*ControllerGetCapabilitiesRequest)(nil), "csi.v0.ControllerGetCapabilitiesRequest") + proto.RegisterType((*ControllerGetCapabilitiesResponse)(nil), "csi.v0.ControllerGetCapabilitiesResponse") + proto.RegisterType((*ControllerServiceCapability)(nil), "csi.v0.ControllerServiceCapability") + proto.RegisterType((*ControllerServiceCapability_RPC)(nil), "csi.v0.ControllerServiceCapability.RPC") + proto.RegisterType((*NodeStageVolumeRequest)(nil), "csi.v0.NodeStageVolumeRequest") + proto.RegisterType((*NodeStageVolumeResponse)(nil), "csi.v0.NodeStageVolumeResponse") + proto.RegisterType((*NodeUnstageVolumeRequest)(nil), "csi.v0.NodeUnstageVolumeRequest") + proto.RegisterType((*NodeUnstageVolumeResponse)(nil), "csi.v0.NodeUnstageVolumeResponse") + proto.RegisterType((*NodePublishVolumeRequest)(nil), "csi.v0.NodePublishVolumeRequest") + proto.RegisterType((*NodePublishVolumeResponse)(nil), "csi.v0.NodePublishVolumeResponse") + proto.RegisterType((*NodeUnpublishVolumeRequest)(nil), "csi.v0.NodeUnpublishVolumeRequest") + proto.RegisterType((*NodeUnpublishVolumeResponse)(nil), "csi.v0.NodeUnpublishVolumeResponse") + proto.RegisterType((*NodeGetIdRequest)(nil), "csi.v0.NodeGetIdRequest") + proto.RegisterType((*NodeGetIdResponse)(nil), "csi.v0.NodeGetIdResponse") + proto.RegisterType((*NodeGetCapabilitiesRequest)(nil), "csi.v0.NodeGetCapabilitiesRequest") + proto.RegisterType((*NodeGetCapabilitiesResponse)(nil), "csi.v0.NodeGetCapabilitiesResponse") + proto.RegisterType((*NodeServiceCapability)(nil), "csi.v0.NodeServiceCapability") + proto.RegisterType((*NodeServiceCapability_RPC)(nil), "csi.v0.NodeServiceCapability.RPC") + proto.RegisterEnum("csi.v0.PluginCapability_Service_Type", PluginCapability_Service_Type_name, PluginCapability_Service_Type_value) + proto.RegisterEnum("csi.v0.VolumeCapability_AccessMode_Mode", VolumeCapability_AccessMode_Mode_name, VolumeCapability_AccessMode_Mode_value) + proto.RegisterEnum("csi.v0.ControllerServiceCapability_RPC_Type", ControllerServiceCapability_RPC_Type_name, ControllerServiceCapability_RPC_Type_value) + proto.RegisterEnum("csi.v0.NodeServiceCapability_RPC_Type", NodeServiceCapability_RPC_Type_name, NodeServiceCapability_RPC_Type_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Identity service + +type IdentityClient interface { + GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) + GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) + Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) +} + +type identityClient struct { + cc *grpc.ClientConn +} + +func NewIdentityClient(cc *grpc.ClientConn) IdentityClient { + return &identityClient{cc} +} + +func (c *identityClient) GetPluginInfo(ctx context.Context, in *GetPluginInfoRequest, opts ...grpc.CallOption) (*GetPluginInfoResponse, error) { + out := new(GetPluginInfoResponse) + err := grpc.Invoke(ctx, "/csi.v0.Identity/GetPluginInfo", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *identityClient) GetPluginCapabilities(ctx context.Context, in *GetPluginCapabilitiesRequest, opts ...grpc.CallOption) (*GetPluginCapabilitiesResponse, error) { + out := new(GetPluginCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.v0.Identity/GetPluginCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *identityClient) Probe(ctx context.Context, in *ProbeRequest, opts ...grpc.CallOption) (*ProbeResponse, error) { + out := new(ProbeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Identity/Probe", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Identity service + +type IdentityServer interface { + GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error) + GetPluginCapabilities(context.Context, *GetPluginCapabilitiesRequest) (*GetPluginCapabilitiesResponse, error) + Probe(context.Context, *ProbeRequest) (*ProbeResponse, error) +} + +func RegisterIdentityServer(s *grpc.Server, srv IdentityServer) { + s.RegisterService(&_Identity_serviceDesc, srv) +} + +func _Identity_GetPluginInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPluginInfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetPluginInfo(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Identity/GetPluginInfo", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetPluginInfo(ctx, req.(*GetPluginInfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Identity_GetPluginCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPluginCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).GetPluginCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Identity/GetPluginCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).GetPluginCapabilities(ctx, req.(*GetPluginCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Identity_Probe_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProbeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IdentityServer).Probe(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Identity/Probe", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IdentityServer).Probe(ctx, req.(*ProbeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Identity_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v0.Identity", + HandlerType: (*IdentityServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetPluginInfo", + Handler: _Identity_GetPluginInfo_Handler, + }, + { + MethodName: "GetPluginCapabilities", + Handler: _Identity_GetPluginCapabilities_Handler, + }, + { + MethodName: "Probe", + Handler: _Identity_Probe_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +// Client API for Controller service + +type ControllerClient interface { + CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) + DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) + ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) + GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) + ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) +} + +type controllerClient struct { + cc *grpc.ClientConn +} + +func NewControllerClient(cc *grpc.ClientConn) ControllerClient { + return &controllerClient{cc} +} + +func (c *controllerClient) CreateVolume(ctx context.Context, in *CreateVolumeRequest, opts ...grpc.CallOption) (*CreateVolumeResponse, error) { + out := new(CreateVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/CreateVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) DeleteVolume(ctx context.Context, in *DeleteVolumeRequest, opts ...grpc.CallOption) (*DeleteVolumeResponse, error) { + out := new(DeleteVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/DeleteVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerPublishVolume(ctx context.Context, in *ControllerPublishVolumeRequest, opts ...grpc.CallOption) (*ControllerPublishVolumeResponse, error) { + out := new(ControllerPublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ControllerPublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerUnpublishVolume(ctx context.Context, in *ControllerUnpublishVolumeRequest, opts ...grpc.CallOption) (*ControllerUnpublishVolumeResponse, error) { + out := new(ControllerUnpublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ControllerUnpublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ValidateVolumeCapabilities(ctx context.Context, in *ValidateVolumeCapabilitiesRequest, opts ...grpc.CallOption) (*ValidateVolumeCapabilitiesResponse, error) { + out := new(ValidateVolumeCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ValidateVolumeCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ListVolumes(ctx context.Context, in *ListVolumesRequest, opts ...grpc.CallOption) (*ListVolumesResponse, error) { + out := new(ListVolumesResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ListVolumes", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) GetCapacity(ctx context.Context, in *GetCapacityRequest, opts ...grpc.CallOption) (*GetCapacityResponse, error) { + out := new(GetCapacityResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/GetCapacity", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) ControllerGetCapabilities(ctx context.Context, in *ControllerGetCapabilitiesRequest, opts ...grpc.CallOption) (*ControllerGetCapabilitiesResponse, error) { + out := new(ControllerGetCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.v0.Controller/ControllerGetCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Controller service + +type ControllerServer interface { + CreateVolume(context.Context, *CreateVolumeRequest) (*CreateVolumeResponse, error) + DeleteVolume(context.Context, *DeleteVolumeRequest) (*DeleteVolumeResponse, error) + ControllerPublishVolume(context.Context, *ControllerPublishVolumeRequest) (*ControllerPublishVolumeResponse, error) + ControllerUnpublishVolume(context.Context, *ControllerUnpublishVolumeRequest) (*ControllerUnpublishVolumeResponse, error) + ValidateVolumeCapabilities(context.Context, *ValidateVolumeCapabilitiesRequest) (*ValidateVolumeCapabilitiesResponse, error) + ListVolumes(context.Context, *ListVolumesRequest) (*ListVolumesResponse, error) + GetCapacity(context.Context, *GetCapacityRequest) (*GetCapacityResponse, error) + ControllerGetCapabilities(context.Context, *ControllerGetCapabilitiesRequest) (*ControllerGetCapabilitiesResponse, error) +} + +func RegisterControllerServer(s *grpc.Server, srv ControllerServer) { + s.RegisterService(&_Controller_serviceDesc, srv) +} + +func _Controller_CreateVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).CreateVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/CreateVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).CreateVolume(ctx, req.(*CreateVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_DeleteVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).DeleteVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/DeleteVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).DeleteVolume(ctx, req.(*DeleteVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerPublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerPublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerPublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ControllerPublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerPublishVolume(ctx, req.(*ControllerPublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ControllerUnpublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerUnpublishVolume(ctx, req.(*ControllerUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ValidateVolumeCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateVolumeCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ValidateVolumeCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ValidateVolumeCapabilities(ctx, req.(*ValidateVolumeCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ListVolumes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListVolumesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ListVolumes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ListVolumes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ListVolumes(ctx, req.(*ListVolumesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_GetCapacity_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCapacityRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).GetCapacity(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/GetCapacity", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).GetCapacity(ctx, req.(*GetCapacityRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_ControllerGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Controller/ControllerGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).ControllerGetCapabilities(ctx, req.(*ControllerGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Controller_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v0.Controller", + HandlerType: (*ControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateVolume", + Handler: _Controller_CreateVolume_Handler, + }, + { + MethodName: "DeleteVolume", + Handler: _Controller_DeleteVolume_Handler, + }, + { + MethodName: "ControllerPublishVolume", + Handler: _Controller_ControllerPublishVolume_Handler, + }, + { + MethodName: "ControllerUnpublishVolume", + Handler: _Controller_ControllerUnpublishVolume_Handler, + }, + { + MethodName: "ValidateVolumeCapabilities", + Handler: _Controller_ValidateVolumeCapabilities_Handler, + }, + { + MethodName: "ListVolumes", + Handler: _Controller_ListVolumes_Handler, + }, + { + MethodName: "GetCapacity", + Handler: _Controller_GetCapacity_Handler, + }, + { + MethodName: "ControllerGetCapabilities", + Handler: _Controller_ControllerGetCapabilities_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +// Client API for Node service + +type NodeClient interface { + NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) + NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) + NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) + NodeGetId(ctx context.Context, in *NodeGetIdRequest, opts ...grpc.CallOption) (*NodeGetIdResponse, error) + NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) +} + +type nodeClient struct { + cc *grpc.ClientConn +} + +func NewNodeClient(cc *grpc.ClientConn) NodeClient { + return &nodeClient{cc} +} + +func (c *nodeClient) NodeStageVolume(ctx context.Context, in *NodeStageVolumeRequest, opts ...grpc.CallOption) (*NodeStageVolumeResponse, error) { + out := new(NodeStageVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeStageVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnstageVolume(ctx context.Context, in *NodeUnstageVolumeRequest, opts ...grpc.CallOption) (*NodeUnstageVolumeResponse, error) { + out := new(NodeUnstageVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeUnstageVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodePublishVolume(ctx context.Context, in *NodePublishVolumeRequest, opts ...grpc.CallOption) (*NodePublishVolumeResponse, error) { + out := new(NodePublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodePublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnpublishVolume(ctx context.Context, in *NodeUnpublishVolumeRequest, opts ...grpc.CallOption) (*NodeUnpublishVolumeResponse, error) { + out := new(NodeUnpublishVolumeResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeUnpublishVolume", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetId(ctx context.Context, in *NodeGetIdRequest, opts ...grpc.CallOption) (*NodeGetIdResponse, error) { + out := new(NodeGetIdResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeGetId", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeGetCapabilities(ctx context.Context, in *NodeGetCapabilitiesRequest, opts ...grpc.CallOption) (*NodeGetCapabilitiesResponse, error) { + out := new(NodeGetCapabilitiesResponse) + err := grpc.Invoke(ctx, "/csi.v0.Node/NodeGetCapabilities", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Node service + +type NodeServer interface { + NodeStageVolume(context.Context, *NodeStageVolumeRequest) (*NodeStageVolumeResponse, error) + NodeUnstageVolume(context.Context, *NodeUnstageVolumeRequest) (*NodeUnstageVolumeResponse, error) + NodePublishVolume(context.Context, *NodePublishVolumeRequest) (*NodePublishVolumeResponse, error) + NodeUnpublishVolume(context.Context, *NodeUnpublishVolumeRequest) (*NodeUnpublishVolumeResponse, error) + NodeGetId(context.Context, *NodeGetIdRequest) (*NodeGetIdResponse, error) + NodeGetCapabilities(context.Context, *NodeGetCapabilitiesRequest) (*NodeGetCapabilitiesResponse, error) +} + +func RegisterNodeServer(s *grpc.Server, srv NodeServer) { + s.RegisterService(&_Node_serviceDesc, srv) +} + +func _Node_NodeStageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeStageVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeStageVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeStageVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeStageVolume(ctx, req.(*NodeStageVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnstageVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnstageVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnstageVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeUnstageVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnstageVolume(ctx, req.(*NodeUnstageVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodePublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodePublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodePublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodePublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodePublishVolume(ctx, req.(*NodePublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnpublishVolume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnpublishVolumeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnpublishVolume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeUnpublishVolume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnpublishVolume(ctx, req.(*NodeUnpublishVolumeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetId_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetIdRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetId(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeGetId", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetId(ctx, req.(*NodeGetIdRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeGetCapabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeGetCapabilitiesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeGetCapabilities(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/csi.v0.Node/NodeGetCapabilities", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeGetCapabilities(ctx, req.(*NodeGetCapabilitiesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Node_serviceDesc = grpc.ServiceDesc{ + ServiceName: "csi.v0.Node", + HandlerType: (*NodeServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "NodeStageVolume", + Handler: _Node_NodeStageVolume_Handler, + }, + { + MethodName: "NodeUnstageVolume", + Handler: _Node_NodeUnstageVolume_Handler, + }, + { + MethodName: "NodePublishVolume", + Handler: _Node_NodePublishVolume_Handler, + }, + { + MethodName: "NodeUnpublishVolume", + Handler: _Node_NodeUnpublishVolume_Handler, + }, + { + MethodName: "NodeGetId", + Handler: _Node_NodeGetId_Handler, + }, + { + MethodName: "NodeGetCapabilities", + Handler: _Node_NodeGetCapabilities_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "csi.proto", +} + +func init() { proto.RegisterFile("csi.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2173 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x1a, 0x4d, 0x53, 0xe3, 0xc8, + 0x15, 0xf9, 0x0b, 0x78, 0x06, 0xd6, 0xd3, 0x30, 0x83, 0x11, 0x30, 0x03, 0x62, 0x67, 0x96, 0xfd, + 0xf2, 0x26, 0x24, 0xd9, 0xda, 0xec, 0x90, 0x49, 0xc0, 0x28, 0xe0, 0xc5, 0x18, 0x4a, 0x18, 0xa6, + 0x76, 0xb2, 0x29, 0xad, 0xb0, 0x1b, 0x46, 0x19, 0x23, 0x79, 0x24, 0x99, 0x1a, 0x6e, 0xa9, 0x5c, + 0x52, 0x95, 0x5b, 0xee, 0x49, 0xa5, 0x2a, 0xc7, 0x9c, 0x73, 0x48, 0x55, 0xaa, 0xe6, 0x9c, 0xaa, + 0xfc, 0x81, 0x9c, 0xf2, 0x03, 0x52, 0x39, 0xe6, 0x94, 0x53, 0x4a, 0xdd, 0x2d, 0xb9, 0x25, 0xb7, + 0x64, 0x7b, 0x67, 0x6a, 0x2b, 0x27, 0xac, 0xf7, 0xdd, 0xaf, 0xdf, 0x7b, 0xfd, 0x5e, 0x37, 0x30, + 0xdd, 0x72, 0xcd, 0x4a, 0xd7, 0xb1, 0x3d, 0x1b, 0x15, 0xfc, 0x9f, 0x37, 0xdf, 0x51, 0xee, 0xc1, + 0xc2, 0x3e, 0xf6, 0x4e, 0x3a, 0xbd, 0x2b, 0xd3, 0xaa, 0x59, 0x97, 0xb6, 0x86, 0x5f, 0xf6, 0xb0, + 0xeb, 0x29, 0xff, 0x90, 0xe0, 0x6e, 0x0c, 0xe1, 0x76, 0x6d, 0xcb, 0xc5, 0x08, 0x41, 0xce, 0x32, + 0xae, 0x71, 0x59, 0x5a, 0x93, 0x36, 0xa7, 0x35, 0xf2, 0x1b, 0x3d, 0x84, 0xb9, 0x1b, 0x6c, 0xb5, + 0x6d, 0x47, 0xbf, 0xc1, 0x8e, 0x6b, 0xda, 0x56, 0x39, 0x43, 0xb0, 0xb3, 0x14, 0x7a, 0x4e, 0x81, + 0x68, 0x1f, 0xa6, 0xae, 0x0d, 0xcb, 0xbc, 0xc4, 0xae, 0x57, 0xce, 0xae, 0x65, 0x37, 0x8b, 0x5b, + 0x1f, 0x56, 0xa8, 0x1d, 0x15, 0xa1, 0xae, 0xca, 0x11, 0xa3, 0x56, 0x2d, 0xcf, 0xb9, 0xd5, 0x42, + 0x66, 0xf9, 0x31, 0xcc, 0x46, 0x50, 0xa8, 0x04, 0xd9, 0x17, 0xf8, 0x96, 0xd9, 0xe4, 0xff, 0x44, + 0x0b, 0x90, 0xbf, 0x31, 0x3a, 0x3d, 0xcc, 0x2c, 0xa1, 0x1f, 0x9f, 0x67, 0x3e, 0x93, 0x94, 0xfb, + 0xb0, 0x12, 0x6a, 0xab, 0x1a, 0x5d, 0xe3, 0xc2, 0xec, 0x98, 0x9e, 0x89, 0xdd, 0x60, 0xe9, 0x3f, + 0x87, 0xd5, 0x04, 0x3c, 0xf3, 0xc0, 0x36, 0xcc, 0xb4, 0x38, 0x78, 0x39, 0x43, 0x96, 0x52, 0x0e, + 0x96, 0x12, 0xe3, 0xbc, 0xd5, 0x22, 0xd4, 0xca, 0xdf, 0x25, 0x28, 0xc5, 0x49, 0xd0, 0x36, 0x4c, + 0xba, 0xd8, 0xb9, 0x31, 0x5b, 0xd4, 0xaf, 0xc5, 0xad, 0xb5, 0x24, 0x69, 0x95, 0x53, 0x4a, 0x77, + 0x30, 0xa1, 0x05, 0x2c, 0xf2, 0x4b, 0x98, 0x64, 0x50, 0xf4, 0x43, 0xc8, 0x79, 0xb7, 0x5d, 0x2a, + 0x65, 0x6e, 0xeb, 0xe1, 0x30, 0x29, 0x95, 0xe6, 0x6d, 0x17, 0x6b, 0x84, 0x45, 0xf9, 0x10, 0x72, + 0xfe, 0x17, 0x2a, 0xc2, 0xe4, 0x59, 0xe3, 0xb0, 0x71, 0xfc, 0xb4, 0x51, 0x9a, 0x40, 0xf7, 0x00, + 0x55, 0x8f, 0x1b, 0x4d, 0xed, 0xb8, 0x5e, 0x57, 0x35, 0xfd, 0x54, 0xd5, 0xce, 0x6b, 0x55, 0xb5, + 0x24, 0xed, 0x16, 0xa8, 0x1e, 0x65, 0x0e, 0x66, 0x4e, 0x1c, 0xfb, 0x02, 0x07, 0xce, 0x7b, 0x07, + 0x66, 0xd9, 0x37, 0x75, 0x96, 0xf2, 0xcb, 0x1c, 0xcc, 0x57, 0x1d, 0x6c, 0x78, 0xf8, 0xdc, 0xee, + 0xf4, 0xae, 0x03, 0x42, 0x61, 0x18, 0x6d, 0xc3, 0x9c, 0xef, 0xaa, 0x96, 0xe9, 0xdd, 0xea, 0x8e, + 0x61, 0x5d, 0xd1, 0xcd, 0x2b, 0x6e, 0xdd, 0x0d, 0x96, 0x51, 0x65, 0x58, 0xcd, 0x47, 0x6a, 0xb3, + 0x2d, 0xfe, 0x13, 0xd5, 0x60, 0xfe, 0x86, 0xa8, 0xd0, 0x23, 0xbb, 0x93, 0x8d, 0xee, 0x0e, 0xb5, + 0x82, 0xdb, 0x1d, 0x74, 0x13, 0x85, 0x98, 0xd8, 0x45, 0x87, 0x00, 0x5d, 0xc3, 0x31, 0xae, 0xb1, + 0x87, 0x1d, 0xb7, 0x9c, 0x8b, 0x86, 0xaa, 0x60, 0x35, 0x95, 0x93, 0x90, 0x9a, 0x86, 0x2a, 0xc7, + 0x8e, 0x3c, 0x58, 0x6a, 0xd9, 0x96, 0xe7, 0xd8, 0x9d, 0x0e, 0x76, 0xf4, 0x16, 0xe1, 0xd6, 0x5d, + 0xdc, 0x72, 0xb0, 0xe7, 0x96, 0xf3, 0x44, 0xf6, 0x67, 0x69, 0xb2, 0xab, 0x21, 0x33, 0xc5, 0x9e, + 0x52, 0x56, 0xaa, 0x68, 0xb1, 0x25, 0xc6, 0xca, 0x3f, 0x82, 0x77, 0x62, 0x46, 0x8d, 0x93, 0x24, + 0xf2, 0x17, 0xb0, 0x92, 0xa6, 0x77, 0xac, 0x84, 0x7b, 0x02, 0x0b, 0xd1, 0x75, 0xb1, 0x3c, 0x7a, + 0x04, 0x05, 0xea, 0x7b, 0x16, 0xf3, 0x73, 0xd1, 0x3d, 0xd2, 0x18, 0x56, 0xf9, 0x53, 0x0e, 0x4a, + 0xf1, 0x6d, 0x43, 0xdb, 0x90, 0xbf, 0xe8, 0xd8, 0xad, 0x17, 0x8c, 0xf7, 0xdd, 0xa4, 0xfd, 0xad, + 0xec, 0xfa, 0x54, 0x14, 0x7a, 0x30, 0xa1, 0x51, 0x26, 0x9f, 0xfb, 0xda, 0xee, 0x59, 0x1e, 0x0b, + 0xb0, 0x64, 0xee, 0x23, 0x9f, 0xaa, 0xcf, 0x4d, 0x98, 0xd0, 0x1e, 0x14, 0x8d, 0x56, 0x0b, 0xbb, + 0xae, 0x7e, 0x6d, 0xb7, 0x71, 0x39, 0x4b, 0x64, 0x6c, 0x24, 0xca, 0xd8, 0x21, 0xb4, 0x47, 0x76, + 0x1b, 0x6b, 0x60, 0x84, 0xbf, 0xe5, 0x59, 0x28, 0x72, 0xb6, 0xc9, 0xfb, 0x50, 0xe4, 0x94, 0xa1, + 0x45, 0x98, 0xbc, 0x74, 0xf5, 0x30, 0x97, 0xa7, 0xb5, 0xc2, 0xa5, 0x4b, 0xd2, 0xf3, 0x01, 0x14, + 0x89, 0x15, 0xfa, 0x65, 0xc7, 0xb8, 0xa2, 0xc5, 0x67, 0x5a, 0x03, 0x02, 0xfa, 0xa9, 0x0f, 0x91, + 0xff, 0x2d, 0x01, 0xf4, 0x55, 0xa2, 0x6d, 0xc8, 0x11, 0x2b, 0x69, 0x45, 0xd8, 0x1c, 0xc1, 0xca, + 0x0a, 0x31, 0x95, 0x70, 0x29, 0x7f, 0x90, 0x20, 0x47, 0xc4, 0xc4, 0xab, 0xc2, 0x69, 0xad, 0xb1, + 0x5f, 0x57, 0xf5, 0xc6, 0xf1, 0x9e, 0xaa, 0x3f, 0xd5, 0x6a, 0x4d, 0x55, 0x2b, 0x49, 0x68, 0x19, + 0x16, 0x79, 0xb8, 0xa6, 0xee, 0xec, 0xa9, 0x9a, 0x7e, 0xdc, 0xa8, 0x7f, 0x59, 0xca, 0x20, 0x19, + 0xee, 0x1d, 0x9d, 0xd5, 0x9b, 0xb5, 0x41, 0x5c, 0x16, 0xad, 0x40, 0x99, 0xc3, 0x31, 0x19, 0x4c, + 0x6c, 0xce, 0x17, 0xcb, 0x61, 0xe9, 0x4f, 0x86, 0xcc, 0xef, 0xce, 0x86, 0x9b, 0x41, 0x0a, 0xd2, + 0x53, 0x98, 0x8d, 0x54, 0x09, 0xff, 0x6c, 0x72, 0xf0, 0xcb, 0x9e, 0xe9, 0xe0, 0xb6, 0x7e, 0x71, + 0xeb, 0x61, 0x97, 0x78, 0x22, 0xab, 0xcd, 0x06, 0xd0, 0x5d, 0x1f, 0xe8, 0xbb, 0xb5, 0x63, 0x5e, + 0x9b, 0x1e, 0xa3, 0xc9, 0x10, 0x1a, 0x20, 0x20, 0x42, 0xa0, 0xbc, 0x96, 0xa0, 0xc0, 0xf6, 0xe6, + 0x21, 0x57, 0xa7, 0x22, 0x22, 0x03, 0x28, 0x15, 0x39, 0x07, 0x19, 0xb3, 0xcd, 0xd2, 0x21, 0x63, + 0xb6, 0xd1, 0x13, 0x00, 0xc3, 0xf3, 0x1c, 0xf3, 0xa2, 0xe7, 0x85, 0x75, 0xe9, 0x7e, 0x74, 0x3f, + 0x2a, 0x3b, 0x21, 0x01, 0x2b, 0x24, 0x7d, 0x0e, 0x3f, 0xa5, 0x63, 0xe8, 0xb1, 0xd2, 0xf0, 0x3f, + 0x12, 0xcc, 0xef, 0xe1, 0x0e, 0x8e, 0x57, 0xe2, 0x65, 0x98, 0x66, 0x75, 0xd3, 0x6c, 0x33, 0x49, + 0x53, 0x14, 0x50, 0x6b, 0xc7, 0x8a, 0x57, 0x9b, 0xb0, 0x87, 0xc5, 0x2b, 0x13, 0x2d, 0x5e, 0x02, + 0xe1, 0x5c, 0xf1, 0xa2, 0xd8, 0xa4, 0xe2, 0x15, 0xc1, 0x46, 0xab, 0xcf, 0x20, 0xe3, 0x58, 0xcb, + 0xbe, 0x07, 0x0b, 0x51, 0xc3, 0xd8, 0xc1, 0xf4, 0xd7, 0x1c, 0xdc, 0xef, 0x2b, 0x39, 0xe9, 0x5d, + 0x74, 0x4c, 0xf7, 0xf9, 0x18, 0x9e, 0x59, 0x84, 0x49, 0xcb, 0x6e, 0x13, 0x14, 0xd5, 0x59, 0xf0, + 0x3f, 0x6b, 0x6d, 0xa4, 0xc2, 0x9d, 0xf8, 0x39, 0x74, 0xcb, 0x6a, 0x44, 0xf2, 0x29, 0x54, 0xba, + 0x89, 0x17, 0x38, 0x19, 0xa6, 0x1c, 0x6c, 0xb4, 0x6d, 0xab, 0x73, 0x5b, 0xce, 0xad, 0x49, 0x9b, + 0x53, 0x5a, 0xf8, 0x8d, 0x7e, 0x25, 0x81, 0xcc, 0x6d, 0x4b, 0x97, 0x1a, 0x1f, 0x3b, 0x54, 0xf6, + 0xc2, 0x43, 0x25, 0x75, 0x95, 0x83, 0xe8, 0xc8, 0x1e, 0x95, 0x5b, 0x09, 0x68, 0x64, 0x86, 0xeb, + 0xe4, 0xa2, 0xba, 0x40, 0x54, 0x6f, 0x8f, 0xa8, 0x9a, 0x7e, 0xc5, 0x63, 0x9e, 0xf9, 0xa2, 0x0f, + 0x96, 0x0f, 0x61, 0x35, 0xd5, 0xca, 0xb1, 0x8e, 0xb6, 0x2a, 0xdc, 0x15, 0xea, 0x1d, 0x2b, 0xaa, + 0x5e, 0x4b, 0xf0, 0x20, 0x71, 0x71, 0xec, 0x7c, 0xfb, 0x19, 0xcc, 0x04, 0x3b, 0x63, 0x5a, 0x97, + 0x76, 0x59, 0x8a, 0x9d, 0xf5, 0xe9, 0xec, 0x15, 0x06, 0xf5, 0x1b, 0x62, 0xea, 0x97, 0x62, 0xb7, + 0x0f, 0x91, 0x9f, 0x40, 0x29, 0x4e, 0x30, 0xd6, 0x02, 0xfe, 0x9c, 0x81, 0xb5, 0xbe, 0x05, 0x67, + 0x56, 0xf7, 0xed, 0x25, 0xc0, 0x6f, 0x24, 0x58, 0xe1, 0xa2, 0xb3, 0x67, 0xc5, 0xe3, 0x93, 0x96, + 0xbe, 0x83, 0x41, 0x47, 0x88, 0xcd, 0x10, 0x11, 0x44, 0x62, 0x94, 0xcb, 0x85, 0x38, 0x81, 0x7c, + 0xc4, 0xef, 0x93, 0x90, 0x7d, 0x2c, 0xb7, 0x6d, 0xc0, 0x7a, 0x8a, 0xb9, 0xac, 0xb4, 0xbc, 0xce, + 0xc0, 0xfa, 0xb9, 0xd1, 0x31, 0xdb, 0x61, 0xcf, 0x23, 0x98, 0x33, 0xd2, 0x9d, 0x9b, 0xd0, 0xcc, + 0x66, 0xbe, 0x41, 0x33, 0xdb, 0x11, 0xe5, 0x29, 0xdd, 0x82, 0x1f, 0x87, 0x82, 0x86, 0x59, 0x3b, + 0x72, 0xaa, 0xbe, 0x95, 0xec, 0xfa, 0x0a, 0x94, 0x34, 0x8b, 0x58, 0x7e, 0xad, 0xc0, 0xb4, 0xdb, + 0xeb, 0x76, 0x6d, 0xc7, 0xc3, 0xd4, 0x81, 0x53, 0x5a, 0x1f, 0x80, 0xca, 0x30, 0x79, 0x8d, 0x5d, + 0xd7, 0xb8, 0x0a, 0xe4, 0x07, 0x9f, 0xca, 0x57, 0x80, 0xea, 0xa6, 0xcb, 0x1a, 0xad, 0x70, 0x3b, + 0xfc, 0xbe, 0xca, 0x78, 0xa5, 0x63, 0xcb, 0x73, 0x4c, 0x76, 0xa2, 0xe7, 0x35, 0xb8, 0x36, 0x5e, + 0xa9, 0x14, 0xe2, 0x9f, 0xfa, 0xae, 0x67, 0x38, 0x9e, 0x69, 0x5d, 0xe9, 0x9e, 0xfd, 0x02, 0x87, + 0x43, 0x6e, 0x00, 0x6d, 0xfa, 0x40, 0xe5, 0x8f, 0x12, 0xcc, 0x47, 0xc4, 0x33, 0x6b, 0x1f, 0xc3, + 0x64, 0x5f, 0xb6, 0xef, 0xfc, 0xf5, 0xc0, 0xf9, 0x02, 0xea, 0x0a, 0x75, 0x6f, 0xc0, 0x81, 0x56, + 0x01, 0x2c, 0xfc, 0xca, 0x8b, 0xe8, 0x9d, 0xf6, 0x21, 0x44, 0xa7, 0xfc, 0x09, 0xe4, 0xa9, 0x93, + 0x47, 0x6d, 0xa9, 0xff, 0x25, 0x01, 0xda, 0xc7, 0x5e, 0xd8, 0x29, 0x31, 0x1f, 0x24, 0x44, 0x9d, + 0xf4, 0x0d, 0xa2, 0xee, 0x8b, 0xc8, 0x08, 0x45, 0xe3, 0xf6, 0x03, 0x6e, 0xda, 0x8f, 0xa9, 0x4e, + 0x9b, 0xa0, 0xde, 0x70, 0x96, 0x51, 0xf6, 0x60, 0x3e, 0xa2, 0x90, 0x6d, 0xc8, 0xc7, 0x80, 0x8c, + 0x1b, 0xc3, 0xec, 0x18, 0x17, 0x1d, 0xba, 0x5e, 0x1f, 0xcb, 0x3a, 0xb9, 0x3b, 0x21, 0x26, 0x60, + 0x53, 0x14, 0xbe, 0x5e, 0x32, 0x79, 0xf1, 0xab, 0x83, 0x0e, 0x5f, 0x1d, 0x06, 0x68, 0x98, 0xde, + 0x7d, 0xe1, 0xf5, 0xc1, 0xc6, 0x60, 0x35, 0x64, 0x33, 0x7a, 0xe2, 0x4d, 0xc2, 0xef, 0x33, 0xb0, + 0x9c, 0x42, 0x8d, 0x1e, 0x43, 0xd6, 0xe9, 0xb6, 0x58, 0x24, 0xbc, 0x37, 0x82, 0xfc, 0x8a, 0x76, + 0x52, 0x3d, 0x98, 0xd0, 0x7c, 0x2e, 0xf9, 0x2f, 0x12, 0x64, 0xb5, 0x93, 0x2a, 0xfa, 0x49, 0xe4, + 0x42, 0xe1, 0xa3, 0x11, 0xa5, 0xf0, 0xf7, 0x0a, 0xb6, 0xe8, 0x5e, 0xa1, 0x0c, 0x0b, 0x55, 0x4d, + 0xdd, 0x69, 0xaa, 0xfa, 0x9e, 0x5a, 0x57, 0x9b, 0xaa, 0x7e, 0x7e, 0x5c, 0x3f, 0x3b, 0x52, 0x4b, + 0x92, 0x3f, 0x0a, 0x9c, 0x9c, 0xed, 0xd6, 0x6b, 0xa7, 0x07, 0xfa, 0x59, 0x23, 0xf8, 0xc5, 0xb0, + 0x19, 0x54, 0x82, 0x99, 0x7a, 0xed, 0xb4, 0xc9, 0x00, 0xa7, 0xa5, 0xac, 0x0f, 0xd9, 0x57, 0x9b, + 0x7a, 0x75, 0xe7, 0x64, 0xa7, 0x5a, 0x6b, 0x7e, 0x59, 0xca, 0x85, 0x77, 0x13, 0xbf, 0xcd, 0xc3, + 0xbd, 0x86, 0xdd, 0xc6, 0xa7, 0x9e, 0x71, 0x35, 0x4e, 0xcf, 0xab, 0xc5, 0xce, 0x6d, 0xba, 0x41, + 0x9f, 0x04, 0x4b, 0x17, 0x8b, 0x4c, 0x3f, 0xae, 0x51, 0x05, 0xe6, 0x5d, 0xcf, 0xb8, 0x22, 0xb5, + 0xc3, 0x70, 0xae, 0xb0, 0xa7, 0x77, 0x0d, 0xef, 0x39, 0x69, 0x0b, 0xa7, 0xb5, 0x3b, 0x0c, 0xd5, + 0x24, 0x98, 0x13, 0xc3, 0x7b, 0x2e, 0x6e, 0x22, 0x73, 0x63, 0x37, 0x91, 0x17, 0x80, 0xc8, 0x19, + 0xed, 0x2b, 0x88, 0x5f, 0x3a, 0x7c, 0x7f, 0xc8, 0x82, 0x42, 0x70, 0xe4, 0xac, 0x2d, 0x59, 0x31, + 0x30, 0x32, 0x92, 0xfb, 0xc0, 0x61, 0x2a, 0x46, 0x3d, 0x54, 0xde, 0xb0, 0xd9, 0xf1, 0x0f, 0x25, + 0xe1, 0x6a, 0xbe, 0xfd, 0xbe, 0x71, 0x09, 0x16, 0x07, 0x7c, 0xc1, 0xba, 0x86, 0x2b, 0x28, 0xfb, + 0xa8, 0x33, 0xcb, 0x1d, 0x33, 0x5e, 0x13, 0x62, 0x2b, 0x93, 0x10, 0x5b, 0xca, 0x32, 0x2c, 0x09, + 0x14, 0x31, 0x2b, 0xfe, 0x99, 0xa7, 0x66, 0x8c, 0x3f, 0x10, 0x35, 0x85, 0x69, 0xf3, 0x5d, 0x3e, + 0x04, 0x84, 0x43, 0xc0, 0xdb, 0x4d, 0x9c, 0x07, 0x50, 0xe4, 0xe9, 0x72, 0x84, 0x0e, 0xbc, 0x21, + 0x99, 0x95, 0x7f, 0xa3, 0xf1, 0xac, 0x10, 0x1b, 0xcf, 0x7e, 0x01, 0x0b, 0x24, 0xeb, 0xe2, 0x7d, + 0xef, 0x64, 0x74, 0x00, 0x48, 0xf4, 0x08, 0x87, 0x88, 0xe4, 0x1e, 0xc9, 0xe5, 0xd8, 0x14, 0xd6, + 0x12, 0x65, 0xdf, 0x14, 0x51, 0xf4, 0xe9, 0x50, 0x45, 0xdf, 0x56, 0xfe, 0xa9, 0x34, 0xea, 0xff, + 0x2f, 0x26, 0x37, 0x16, 0xfd, 0xc2, 0x99, 0x4b, 0x79, 0x06, 0x32, 0x4d, 0x8d, 0xf1, 0xc7, 0xa1, + 0x58, 0xe0, 0x65, 0xe2, 0x81, 0xa7, 0xac, 0xc2, 0xb2, 0x50, 0x36, 0x53, 0x8d, 0xa0, 0xe4, 0xa3, + 0xf7, 0xb1, 0x57, 0x6b, 0x07, 0xfd, 0xc4, 0x47, 0x70, 0x87, 0x83, 0xb1, 0xfe, 0x81, 0x9b, 0xbb, + 0x24, 0x7e, 0xee, 0x52, 0x56, 0xa8, 0xf1, 0x09, 0xbd, 0xc9, 0xd7, 0x54, 0x7d, 0x52, 0x57, 0xb2, + 0x13, 0xeb, 0x4a, 0x68, 0xcf, 0xb7, 0x1a, 0x29, 0xe0, 0x43, 0xfa, 0x91, 0xbf, 0x49, 0xac, 0xcc, + 0x0e, 0x74, 0x22, 0x3f, 0xe0, 0x3b, 0x91, 0xf5, 0x54, 0x99, 0x7c, 0x0f, 0xd2, 0xa5, 0x2d, 0xc8, + 0xe7, 0x91, 0x16, 0xe4, 0xd1, 0x50, 0x76, 0xbe, 0xf9, 0xf8, 0x38, 0xa1, 0xf9, 0x38, 0x6d, 0xee, + 0xec, 0xab, 0xfa, 0x59, 0x83, 0xfe, 0x0d, 0x9a, 0x8f, 0xa0, 0x75, 0xd8, 0xfa, 0xaf, 0x04, 0x53, + 0xb5, 0x36, 0xb6, 0x3c, 0xdf, 0xfa, 0x06, 0xcc, 0x46, 0x9e, 0xa7, 0xd0, 0x4a, 0xc2, 0xab, 0x15, + 0x71, 0xb4, 0xbc, 0x9a, 0xfa, 0xa6, 0xa5, 0x4c, 0xa0, 0x4b, 0xee, 0x69, 0x2d, 0xd2, 0x33, 0xbf, + 0x3b, 0xc0, 0x29, 0xd8, 0x48, 0xf9, 0xe1, 0x10, 0xaa, 0x50, 0xcf, 0xa7, 0x90, 0x27, 0x6f, 0x31, + 0x68, 0x21, 0x7c, 0x06, 0xe2, 0x9e, 0x6a, 0xe4, 0xbb, 0x31, 0x68, 0xc0, 0xb7, 0xf5, 0xeb, 0x02, + 0x40, 0xbf, 0xbf, 0x43, 0x87, 0x30, 0xc3, 0x5f, 0xdf, 0xa3, 0xe5, 0x94, 0xc7, 0x0a, 0x79, 0x45, + 0x8c, 0x0c, 0x6d, 0x3a, 0x84, 0x19, 0xfe, 0x36, 0xae, 0x2f, 0x4c, 0x70, 0x79, 0xd8, 0x17, 0x26, + 0xbc, 0xc0, 0x9b, 0x40, 0x1d, 0x58, 0x4c, 0xb8, 0x44, 0x41, 0x8f, 0x46, 0xbb, 0x81, 0x92, 0xdf, + 0x1b, 0xf1, 0x36, 0x46, 0x99, 0x40, 0x0e, 0x2c, 0x25, 0x8e, 0xfe, 0x68, 0x73, 0xd4, 0xcb, 0x0c, + 0xf9, 0xfd, 0x11, 0x28, 0x43, 0x9d, 0x3d, 0x90, 0x93, 0x07, 0x61, 0xf4, 0xfe, 0xc8, 0xe3, 0xbb, + 0xfc, 0xc1, 0x28, 0xa4, 0xa1, 0xda, 0x03, 0x28, 0x72, 0x43, 0x29, 0x92, 0x85, 0x93, 0x2a, 0x15, + 0xbc, 0x9c, 0x32, 0xc5, 0x52, 0x49, 0xdc, 0xec, 0xd5, 0x97, 0x34, 0x38, 0x01, 0xf6, 0x25, 0x09, + 0x86, 0xb5, 0xb8, 0xfb, 0x63, 0x55, 0x4c, 0xe4, 0x7e, 0x71, 0x19, 0x14, 0xb9, 0x3f, 0xa1, 0x24, + 0x2a, 0x13, 0x5b, 0xbf, 0xcb, 0x41, 0xce, 0x2f, 0x33, 0xa8, 0x09, 0xef, 0xc4, 0xda, 0x36, 0x74, + 0x3f, 0xbd, 0xb7, 0x95, 0x1f, 0x24, 0xe2, 0xc3, 0x25, 0x3d, 0xa3, 0xe5, 0x3d, 0xd2, 0x88, 0xa1, + 0x35, 0x9e, 0x4f, 0xd4, 0x0c, 0xca, 0xeb, 0x29, 0x14, 0x71, 0xd9, 0xd1, 0xac, 0x58, 0x1b, 0xd6, + 0x11, 0x44, 0x65, 0x27, 0x65, 0xc2, 0xd7, 0x30, 0x2f, 0x38, 0xc9, 0x90, 0x12, 0xb5, 0x4b, 0x18, + 0xfd, 0x1b, 0xa9, 0x34, 0xa1, 0x86, 0x5d, 0x98, 0x0e, 0x0f, 0x3e, 0x54, 0xe6, 0x79, 0xf8, 0xf3, + 0x51, 0x5e, 0x12, 0x60, 0xe2, 0x56, 0xc6, 0x43, 0x45, 0x89, 0xf1, 0x88, 0x82, 0x64, 0x23, 0x95, + 0x26, 0xd0, 0xb0, 0x9b, 0x7f, 0x96, 0x6d, 0xb9, 0xe6, 0x45, 0x81, 0xfc, 0x4b, 0xc5, 0xf7, 0xfe, + 0x17, 0x00, 0x00, 0xff, 0xff, 0x69, 0xaa, 0xdb, 0x41, 0x5f, 0x21, 0x00, 0x00, +} diff --git a/vendor/github.com/containerd/console/.travis.yml b/vendor/github.com/containerd/console/.travis.yml new file mode 100644 index 000000000000..ba93012c7676 --- /dev/null +++ b/vendor/github.com/containerd/console/.travis.yml @@ -0,0 +1,17 @@ +language: go +go: + - 1.9.x + - tip + +go_import_path: github.com/containerd/console + +install: + - go get -d + - GOOS=windows go get -d + - GOOS=solaris go get -d + +script: + - go test -race + - GOOS=windows go test + - GOOS=solaris go build + - GOOS=solaris go test -c diff --git a/vendor/github.com/containerd/console/LICENSE b/vendor/github.com/containerd/console/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/vendor/github.com/containerd/console/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/console/README.md b/vendor/github.com/containerd/console/README.md new file mode 100644 index 000000000000..4c56d9d134a9 --- /dev/null +++ b/vendor/github.com/containerd/console/README.md @@ -0,0 +1,17 @@ +# console + +[![Build Status](https://travis-ci.org/containerd/console.svg?branch=master)](https://travis-ci.org/containerd/console) + +Golang package for dealing with consoles. Light on deps and a simple API. + +## Modifying the current process + +```go +current := console.Current() +defer current.Reset() + +if err := current.SetRaw(); err != nil { +} +ws, err := current.Size() +current.Resize(ws) +``` diff --git a/vendor/github.com/containerd/console/console.go b/vendor/github.com/containerd/console/console.go new file mode 100644 index 000000000000..bf2798fda37d --- /dev/null +++ b/vendor/github.com/containerd/console/console.go @@ -0,0 +1,62 @@ +package console + +import ( + "errors" + "io" + "os" +) + +var ErrNotAConsole = errors.New("provided file is not a console") + +type Console interface { + io.Reader + io.Writer + io.Closer + + // Resize resizes the console to the provided window size + Resize(WinSize) error + // ResizeFrom resizes the calling console to the size of the + // provided console + ResizeFrom(Console) error + // SetRaw sets the console in raw mode + SetRaw() error + // DisableEcho disables echo on the console + DisableEcho() error + // Reset restores the console to its orignal state + Reset() error + // Size returns the window size of the console + Size() (WinSize, error) + // Fd returns the console's file descriptor + Fd() uintptr + // Name returns the console's file name + Name() string +} + +// WinSize specifies the window size of the console +type WinSize struct { + // Height of the console + Height uint16 + // Width of the console + Width uint16 + x uint16 + y uint16 +} + +// Current returns the current processes console +func Current() Console { + c, err := ConsoleFromFile(os.Stdin) + if err != nil { + // stdin should always be a console for the design + // of this function + panic(err) + } + return c +} + +// ConsoleFromFile returns a console using the provided file +func ConsoleFromFile(f *os.File) (Console, error) { + if err := checkConsole(f); err != nil { + return nil, err + } + return newMaster(f) +} diff --git a/vendor/github.com/containerd/console/console_linux.go b/vendor/github.com/containerd/console/console_linux.go new file mode 100644 index 000000000000..c963729296d7 --- /dev/null +++ b/vendor/github.com/containerd/console/console_linux.go @@ -0,0 +1,255 @@ +// +build linux + +package console + +import ( + "io" + "os" + "sync" + + "golang.org/x/sys/unix" +) + +const ( + maxEvents = 128 +) + +// Epoller manages multiple epoll consoles using edge-triggered epoll api so we +// dont have to deal with repeated wake-up of EPOLLER or EPOLLHUP. +// For more details, see: +// - https://github.com/systemd/systemd/pull/4262 +// - https://github.com/moby/moby/issues/27202 +// +// Example usage of Epoller and EpollConsole can be as follow: +// +// epoller, _ := NewEpoller() +// epollConsole, _ := epoller.Add(console) +// go epoller.Wait() +// var ( +// b bytes.Buffer +// wg sync.WaitGroup +// ) +// wg.Add(1) +// go func() { +// io.Copy(&b, epollConsole) +// wg.Done() +// }() +// // perform I/O on the console +// epollConsole.Shutdown(epoller.CloseConsole) +// wg.Wait() +// epollConsole.Close() +type Epoller struct { + efd int + mu sync.Mutex + fdMapping map[int]*EpollConsole +} + +// NewEpoller returns an instance of epoller with a valid epoll fd. +func NewEpoller() (*Epoller, error) { + efd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC) + if err != nil { + return nil, err + } + return &Epoller{ + efd: efd, + fdMapping: make(map[int]*EpollConsole), + }, nil +} + +// Add creates a epoll console based on the provided console. The console will +// be registered with EPOLLET (i.e. using edge-triggered notification) and its +// file descriptor will be set to non-blocking mode. After this, user should use +// the return console to perform I/O. +func (e *Epoller) Add(console Console) (*EpollConsole, error) { + sysfd := int(console.Fd()) + // Set sysfd to non-blocking mode + if err := unix.SetNonblock(sysfd, true); err != nil { + return nil, err + } + + ev := unix.EpollEvent{ + Events: unix.EPOLLIN | unix.EPOLLOUT | unix.EPOLLRDHUP | unix.EPOLLET, + Fd: int32(sysfd), + } + if err := unix.EpollCtl(e.efd, unix.EPOLL_CTL_ADD, sysfd, &ev); err != nil { + return nil, err + } + ef := &EpollConsole{ + Console: console, + sysfd: sysfd, + readc: sync.NewCond(&sync.Mutex{}), + writec: sync.NewCond(&sync.Mutex{}), + } + e.mu.Lock() + e.fdMapping[sysfd] = ef + e.mu.Unlock() + return ef, nil +} + +// Wait starts the loop to wait for its consoles' notifications and signal +// appropriate console that it can perform I/O. +func (e *Epoller) Wait() error { + events := make([]unix.EpollEvent, maxEvents) + for { + n, err := unix.EpollWait(e.efd, events, -1) + if err != nil { + // EINTR: The call was interrupted by a signal handler before either + // any of the requested events occurred or the timeout expired + if err == unix.EINTR { + continue + } + return err + } + for i := 0; i < n; i++ { + ev := &events[i] + // the console is ready to be read from + if ev.Events&(unix.EPOLLIN|unix.EPOLLHUP|unix.EPOLLERR) != 0 { + if epfile := e.getConsole(int(ev.Fd)); epfile != nil { + epfile.signalRead() + } + } + // the console is ready to be written to + if ev.Events&(unix.EPOLLOUT|unix.EPOLLHUP|unix.EPOLLERR) != 0 { + if epfile := e.getConsole(int(ev.Fd)); epfile != nil { + epfile.signalWrite() + } + } + } + } +} + +// Close unregister the console's file descriptor from epoll interface +func (e *Epoller) CloseConsole(fd int) error { + e.mu.Lock() + defer e.mu.Unlock() + delete(e.fdMapping, fd) + return unix.EpollCtl(e.efd, unix.EPOLL_CTL_DEL, fd, &unix.EpollEvent{}) +} + +func (e *Epoller) getConsole(sysfd int) *EpollConsole { + e.mu.Lock() + f := e.fdMapping[sysfd] + e.mu.Unlock() + return f +} + +// Close the epoll fd +func (e *Epoller) Close() error { + return unix.Close(e.efd) +} + +// EpollConsole acts like a console but register its file descriptor with a +// epoll fd and uses epoll API to perform I/O. +type EpollConsole struct { + Console + readc *sync.Cond + writec *sync.Cond + sysfd int + closed bool +} + +// Read reads up to len(p) bytes into p. It returns the number of bytes read +// (0 <= n <= len(p)) and any error encountered. +// +// If the console's read returns EAGAIN or EIO, we assumes that its a +// temporary error because the other side went away and wait for the signal +// generated by epoll event to continue. +func (ec *EpollConsole) Read(p []byte) (n int, err error) { + var read int + ec.readc.L.Lock() + defer ec.readc.L.Unlock() + for { + read, err = ec.Console.Read(p[n:]) + n += read + if err != nil { + var hangup bool + if perr, ok := err.(*os.PathError); ok { + hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) + } else { + hangup = (err == unix.EAGAIN || err == unix.EIO) + } + // if the other end disappear, assume this is temporary and wait for the + // signal to continue again. Unless we didnt read anything and the + // console is already marked as closed then we should exit + if hangup && !(n == 0 && len(p) > 0 && ec.closed) { + ec.readc.Wait() + continue + } + } + break + } + // if we didnt read anything then return io.EOF to end gracefully + if n == 0 && len(p) > 0 && err == nil { + err = io.EOF + } + // signal for others that we finished the read + ec.readc.Signal() + return n, err +} + +// Writes len(p) bytes from p to the console. It returns the number of bytes +// written from p (0 <= n <= len(p)) and any error encountered that caused +// the write to stop early. +// +// If writes to the console returns EAGAIN or EIO, we assumes that its a +// temporary error because the other side went away and wait for the signal +// generated by epoll event to continue. +func (ec *EpollConsole) Write(p []byte) (n int, err error) { + var written int + ec.writec.L.Lock() + defer ec.writec.L.Unlock() + for { + written, err = ec.Console.Write(p[n:]) + n += written + if err != nil { + var hangup bool + if perr, ok := err.(*os.PathError); ok { + hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) + } else { + hangup = (err == unix.EAGAIN || err == unix.EIO) + } + // if the other end disappear, assume this is temporary and wait for the + // signal to continue again. + if hangup { + ec.writec.Wait() + continue + } + } + // unrecoverable error, break the loop and return the error + break + } + if n < len(p) && err == nil { + err = io.ErrShortWrite + } + // signal for others that we finished the write + ec.writec.Signal() + return n, err +} + +// Close closed the file descriptor and signal call waiters for this fd. +// It accepts a callback which will be called with the console's fd. The +// callback typically will be used to do further cleanup such as unregister the +// console's fd from the epoll interface. +// User should call Shutdown and wait for all I/O operation to be finished +// before closing the console. +func (ec *EpollConsole) Shutdown(close func(int) error) error { + ec.readc.L.Lock() + defer ec.readc.L.Unlock() + ec.writec.L.Lock() + defer ec.writec.L.Unlock() + + ec.readc.Broadcast() + ec.writec.Broadcast() + ec.closed = true + return close(ec.sysfd) +} + +// signalRead signals that the console is readable. +func (ec *EpollConsole) signalRead() { + ec.readc.Signal() +} + +// signalWrite signals that the console is writable. +func (ec *EpollConsole) signalWrite() { + ec.writec.Signal() +} diff --git a/vendor/github.com/containerd/console/console_unix.go b/vendor/github.com/containerd/console/console_unix.go new file mode 100644 index 000000000000..118c8c3abfd2 --- /dev/null +++ b/vendor/github.com/containerd/console/console_unix.go @@ -0,0 +1,142 @@ +// +build darwin freebsd linux solaris + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// NewPty creates a new pty pair +// The master is returned as the first console and a string +// with the path to the pty slave is returned as the second +func NewPty() (Console, string, error) { + f, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0) + if err != nil { + return nil, "", err + } + slave, err := ptsname(f) + if err != nil { + return nil, "", err + } + if err := unlockpt(f); err != nil { + return nil, "", err + } + m, err := newMaster(f) + if err != nil { + return nil, "", err + } + return m, slave, nil +} + +type master struct { + f *os.File + original *unix.Termios +} + +func (m *master) Read(b []byte) (int, error) { + return m.f.Read(b) +} + +func (m *master) Write(b []byte) (int, error) { + return m.f.Write(b) +} + +func (m *master) Close() error { + return m.f.Close() +} + +func (m *master) Resize(ws WinSize) error { + return tcswinsz(m.f.Fd(), ws) +} + +func (m *master) ResizeFrom(c Console) error { + ws, err := c.Size() + if err != nil { + return err + } + return m.Resize(ws) +} + +func (m *master) Reset() error { + if m.original == nil { + return nil + } + return tcset(m.f.Fd(), m.original) +} + +func (m *master) getCurrent() (unix.Termios, error) { + var termios unix.Termios + if err := tcget(m.f.Fd(), &termios); err != nil { + return unix.Termios{}, err + } + return termios, nil +} + +func (m *master) SetRaw() error { + rawState, err := m.getCurrent() + if err != nil { + return err + } + rawState = cfmakeraw(rawState) + rawState.Oflag = rawState.Oflag | unix.OPOST + return tcset(m.f.Fd(), &rawState) +} + +func (m *master) DisableEcho() error { + rawState, err := m.getCurrent() + if err != nil { + return err + } + rawState.Lflag = rawState.Lflag &^ unix.ECHO + return tcset(m.f.Fd(), &rawState) +} + +func (m *master) Size() (WinSize, error) { + return tcgwinsz(m.f.Fd()) +} + +func (m *master) Fd() uintptr { + return m.f.Fd() +} + +func (m *master) Name() string { + return m.f.Name() +} + +// checkConsole checks if the provided file is a console +func checkConsole(f *os.File) error { + var termios unix.Termios + if tcget(f.Fd(), &termios) != nil { + return ErrNotAConsole + } + return nil +} + +func newMaster(f *os.File) (Console, error) { + m := &master{ + f: f, + } + t, err := m.getCurrent() + if err != nil { + return nil, err + } + m.original = &t + return m, nil +} + +// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair +// created by us acts normally. In particular, a not-very-well-known default of +// Linux unix98 ptys is that they have +onlcr by default. While this isn't a +// problem for terminal emulators, because we relay data from the terminal we +// also relay that funky line discipline. +func ClearONLCR(fd uintptr) error { + return setONLCR(fd, false) +} + +// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair +// created by us acts as intended for a terminal emulator. +func SetONLCR(fd uintptr) error { + return setONLCR(fd, true) +} diff --git a/vendor/github.com/containerd/console/console_windows.go b/vendor/github.com/containerd/console/console_windows.go new file mode 100644 index 000000000000..d78a0b8419be --- /dev/null +++ b/vendor/github.com/containerd/console/console_windows.go @@ -0,0 +1,200 @@ +package console + +import ( + "fmt" + "os" + + "github.com/pkg/errors" + "golang.org/x/sys/windows" +) + +var ( + vtInputSupported bool + ErrNotImplemented = errors.New("not implemented") +) + +func (m *master) initStdios() { + m.in = windows.Handle(os.Stdin.Fd()) + if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil { + // Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. + if err = windows.SetConsoleMode(m.in, m.inMode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + windows.SetConsoleMode(m.in, m.inMode) + } else { + fmt.Printf("failed to get console mode for stdin: %v\n", err) + } + + m.out = windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(m.out, &m.outMode); err == nil { + if err := windows.SetConsoleMode(m.out, m.outMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { + m.outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + windows.SetConsoleMode(m.out, m.outMode) + } + } else { + fmt.Printf("failed to get console mode for stdout: %v\n", err) + } + + m.err = windows.Handle(os.Stderr.Fd()) + if err := windows.GetConsoleMode(m.err, &m.errMode); err == nil { + if err := windows.SetConsoleMode(m.err, m.errMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { + m.errMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + windows.SetConsoleMode(m.err, m.errMode) + } + } else { + fmt.Printf("failed to get console mode for stderr: %v\n", err) + } +} + +type master struct { + in windows.Handle + inMode uint32 + + out windows.Handle + outMode uint32 + + err windows.Handle + errMode uint32 +} + +func (m *master) SetRaw() error { + if err := makeInputRaw(m.in, m.inMode); err != nil { + return err + } + + // Set StdOut and StdErr to raw mode, we ignore failures since + // windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of + // Windows. + + windows.SetConsoleMode(m.out, m.outMode|windows.DISABLE_NEWLINE_AUTO_RETURN) + + windows.SetConsoleMode(m.err, m.errMode|windows.DISABLE_NEWLINE_AUTO_RETURN) + + return nil +} + +func (m *master) Reset() error { + for _, s := range []struct { + fd windows.Handle + mode uint32 + }{ + {m.in, m.inMode}, + {m.out, m.outMode}, + {m.err, m.errMode}, + } { + if err := windows.SetConsoleMode(s.fd, s.mode); err != nil { + return errors.Wrap(err, "unable to restore console mode") + } + } + + return nil +} + +func (m *master) Size() (WinSize, error) { + var info windows.ConsoleScreenBufferInfo + err := windows.GetConsoleScreenBufferInfo(m.out, &info) + if err != nil { + return WinSize{}, errors.Wrap(err, "unable to get console info") + } + + winsize := WinSize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +func (m *master) Resize(ws WinSize) error { + return ErrNotImplemented +} + +func (m *master) ResizeFrom(c Console) error { + return ErrNotImplemented +} + +func (m *master) DisableEcho() error { + mode := m.inMode &^ windows.ENABLE_ECHO_INPUT + mode |= windows.ENABLE_PROCESSED_INPUT + mode |= windows.ENABLE_LINE_INPUT + + if err := windows.SetConsoleMode(m.in, mode); err != nil { + return errors.Wrap(err, "unable to set console to disable echo") + } + + return nil +} + +func (m *master) Close() error { + return nil +} + +func (m *master) Read(b []byte) (int, error) { + panic("not implemented on windows") +} + +func (m *master) Write(b []byte) (int, error) { + panic("not implemented on windows") +} + +func (m *master) Fd() uintptr { + return uintptr(m.in) +} + +// on windows, console can only be made from os.Std{in,out,err}, hence there +// isnt a single name here we can use. Return a dummy "console" value in this +// case should be sufficient. +func (m *master) Name() string { + return "console" +} + +// makeInputRaw puts the terminal (Windows Console) connected to the given +// file descriptor into raw mode +func makeInputRaw(fd windows.Handle, mode uint32) error { + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= windows.ENABLE_ECHO_INPUT + mode &^= windows.ENABLE_LINE_INPUT + mode &^= windows.ENABLE_MOUSE_INPUT + mode &^= windows.ENABLE_WINDOW_INPUT + mode &^= windows.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= windows.ENABLE_EXTENDED_FLAGS + mode |= windows.ENABLE_INSERT_MODE + mode |= windows.ENABLE_QUICK_EDIT_MODE + + if vtInputSupported { + mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT + } + + if err := windows.SetConsoleMode(fd, mode); err != nil { + return errors.Wrap(err, "unable to set console to raw mode") + } + + return nil +} + +func checkConsole(f *os.File) error { + var mode uint32 + if err := windows.GetConsoleMode(windows.Handle(f.Fd()), &mode); err != nil { + return err + } + return nil +} + +func newMaster(f *os.File) (Console, error) { + if f != os.Stdin && f != os.Stdout && f != os.Stderr { + return nil, errors.New("creating a console from a file is not supported on windows") + } + m := &master{} + m.initStdios() + return m, nil +} diff --git a/vendor/github.com/containerd/console/tc_darwin.go b/vendor/github.com/containerd/console/tc_darwin.go new file mode 100644 index 000000000000..b102bad743a0 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_darwin.go @@ -0,0 +1,37 @@ +package console + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TIOCGETA + cmdTcSet = unix.TIOCSETA +) + +func ioctl(fd, flag, data uintptr) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + var u int32 + return ioctl(f.Fd(), unix.TIOCPTYUNLK, uintptr(unsafe.Pointer(&u))) +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCPTYGNAME) + if err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/console/tc_freebsd.go b/vendor/github.com/containerd/console/tc_freebsd.go new file mode 100644 index 000000000000..e2a10e4413c3 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_freebsd.go @@ -0,0 +1,29 @@ +package console + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TIOCGETA + cmdTcSet = unix.TIOCSETA +) + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +// This does not exist on FreeBSD, it does not allocate controlling terminals on open +func unlockpt(f *os.File) error { + return nil +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) + if err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/console/tc_linux.go b/vendor/github.com/containerd/console/tc_linux.go new file mode 100644 index 000000000000..80ef2f6fb39f --- /dev/null +++ b/vendor/github.com/containerd/console/tc_linux.go @@ -0,0 +1,37 @@ +package console + +import ( + "fmt" + "os" + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +func ioctl(fd, flag, data uintptr) error { + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 { + return err + } + return nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + var u int32 + return ioctl(f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))) +} + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) + if err != nil { + return "", err + } + return fmt.Sprintf("/dev/pts/%d", n), nil +} diff --git a/vendor/github.com/containerd/console/tc_solaris_cgo.go b/vendor/github.com/containerd/console/tc_solaris_cgo.go new file mode 100644 index 000000000000..f8066d8e3982 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_solaris_cgo.go @@ -0,0 +1,35 @@ +// +build solaris,cgo + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +//#include +import "C" + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +// ptsname retrieves the name of the first available pts for the given master. +func ptsname(f *os.File) (string, error) { + ptspath, err := C.ptsname(C.int(f.Fd())) + if err != nil { + return "", err + } + return C.GoString(ptspath), nil +} + +// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. +// unlockpt should be called before opening the slave side of a pty. +func unlockpt(f *os.File) error { + if _, err := C.grantpt(C.int(f.Fd())); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/containerd/console/tc_solaris_nocgo.go b/vendor/github.com/containerd/console/tc_solaris_nocgo.go new file mode 100644 index 000000000000..0aefa0d2bb12 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_solaris_nocgo.go @@ -0,0 +1,31 @@ +// +build solaris,!cgo + +// +// Implementing the functions below requires cgo support. Non-cgo stubs +// versions are defined below to enable cross-compilation of source code +// that depends on these functions, but the resultant cross-compiled +// binaries cannot actually be used. If the stub function(s) below are +// actually invoked they will display an error message and cause the +// calling process to exit. +// + +package console + +import ( + "os" + + "golang.org/x/sys/unix" +) + +const ( + cmdTcGet = unix.TCGETS + cmdTcSet = unix.TCSETS +) + +func ptsname(f *os.File) (string, error) { + panic("ptsname() support requires cgo.") +} + +func unlockpt(f *os.File) error { + panic("unlockpt() support requires cgo.") +} diff --git a/vendor/github.com/containerd/console/tc_unix.go b/vendor/github.com/containerd/console/tc_unix.go new file mode 100644 index 000000000000..df7dcb933420 --- /dev/null +++ b/vendor/github.com/containerd/console/tc_unix.go @@ -0,0 +1,75 @@ +// +build darwin freebsd linux solaris + +package console + +import ( + "golang.org/x/sys/unix" +) + +func tcget(fd uintptr, p *unix.Termios) error { + termios, err := unix.IoctlGetTermios(int(fd), cmdTcGet) + if err != nil { + return err + } + *p = *termios + return nil +} + +func tcset(fd uintptr, p *unix.Termios) error { + return unix.IoctlSetTermios(int(fd), cmdTcSet, p) +} + +func tcgwinsz(fd uintptr) (WinSize, error) { + var ws WinSize + + uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + if err != nil { + return ws, err + } + + // Translate from unix.Winsize to console.WinSize + ws.Height = uws.Row + ws.Width = uws.Col + ws.x = uws.Xpixel + ws.y = uws.Ypixel + return ws, nil +} + +func tcswinsz(fd uintptr, ws WinSize) error { + // Translate from console.WinSize to unix.Winsize + + var uws unix.Winsize + uws.Row = ws.Height + uws.Col = ws.Width + uws.Xpixel = ws.x + uws.Ypixel = ws.y + + return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &uws) +} + +func setONLCR(fd uintptr, enable bool) error { + var termios unix.Termios + if err := tcget(fd, &termios); err != nil { + return err + } + if enable { + // Set +onlcr so we can act like a real terminal + termios.Oflag |= unix.ONLCR + } else { + // Set -onlcr so we don't have to deal with \r. + termios.Oflag &^= unix.ONLCR + } + return tcset(fd, &termios) +} + +func cfmakeraw(t unix.Termios) unix.Termios { + t.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + t.Oflag &^= unix.OPOST + t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + t.Cflag &^= (unix.CSIZE | unix.PARENB) + t.Cflag &^= unix.CS8 + t.Cc[unix.VMIN] = 1 + t.Cc[unix.VTIME] = 0 + + return t +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go new file mode 100644 index 000000000000..a129e654ea26 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go @@ -0,0 +1,298 @@ +package fileutils + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + "text/scanner" + + "github.com/sirupsen/logrus" +) + +// PatternMatcher allows checking paths agaist a list of patterns +type PatternMatcher struct { + patterns []*Pattern + exclusions bool +} + +// NewPatternMatcher creates a new matcher object for specific patterns that can +// be used later to match against patterns against paths +func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { + pm := &PatternMatcher{ + patterns: make([]*Pattern, 0, len(patterns)), + } + for _, p := range patterns { + // Eliminate leading and trailing whitespace. + p = strings.TrimSpace(p) + if p == "" { + continue + } + p = filepath.Clean(p) + newp := &Pattern{} + if p[0] == '!' { + if len(p) == 1 { + return nil, errors.New("illegal exclusion pattern: \"!\"") + } + newp.exclusion = true + p = p[1:] + pm.exclusions = true + } + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(p, "."); err != nil { + return nil, err + } + newp.cleanedPattern = p + newp.dirs = strings.Split(p, string(os.PathSeparator)) + pm.patterns = append(pm.patterns, newp) + } + return pm, nil +} + +// Matches matches path against all the patterns. Matches is not safe to be +// called concurrently +func (pm *PatternMatcher) Matches(file string) (bool, error) { + matched := false + file = filepath.FromSlash(file) + parentPath := filepath.Dir(file) + parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) + + for _, pattern := range pm.patterns { + negative := false + + if pattern.exclusion { + negative = true + } + + match, err := pattern.match(file) + if err != nil { + return false, err + } + + if !match && parentPath != "." { + // Check to see if the pattern matches one of our parent dirs. + if len(pattern.dirs) <= len(parentPathDirs) { + match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) + } + } + + if match { + matched = !negative + } + } + + if matched { + logrus.Debugf("Skipping excluded path: %s", file) + } + + return matched, nil +} + +// Exclusions returns true if any of the patterns define exclusions +func (pm *PatternMatcher) Exclusions() bool { + return pm.exclusions +} + +// Patterns returns array of active patterns +func (pm *PatternMatcher) Patterns() []*Pattern { + return pm.patterns +} + +// Pattern defines a single regexp used used to filter file paths. +type Pattern struct { + cleanedPattern string + dirs []string + regexp *regexp.Regexp + exclusion bool +} + +func (p *Pattern) String() string { + return p.cleanedPattern +} + +// Exclusion returns true if this pattern defines exclusion +func (p *Pattern) Exclusion() bool { + return p.exclusion +} + +func (p *Pattern) match(path string) (bool, error) { + + if p.regexp == nil { + if err := p.compile(); err != nil { + return false, filepath.ErrBadPattern + } + } + + b := p.regexp.MatchString(path) + + return b, nil +} + +func (p *Pattern) compile() error { + regStr := "^" + pattern := p.cleanedPattern + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + if sl == `\` { + escSL += `\` + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + // Note that this allows for any # of /'s (even 0) because + // the .* will eat everything, even /'s + regStr += "(.*" + escSL + ")?" + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if ch == '.' || ch == '$' { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + } else { + regStr += `\` + } + } else { + regStr += string(ch) + } + } + + regStr += "$" + + re, err := regexp.Compile(regStr) + if err != nil { + return err + } + + p.regexp = re + return nil +} + +// Matches returns true if file matches any of the patterns +// and isn't excluded by any of the subsequent patterns. +func Matches(file string, patterns []string) (bool, error) { + pm, err := NewPatternMatcher(patterns) + if err != nil { + return false, err + } + file = filepath.Clean(file) + + if file == "." { + // Don't let them exclude everything, kind of silly. + return false, nil + } + + return pm.Matches(file) +} + +// CopyFile copies from src to dst until either EOF is reached +// on src or an error occurs. It verifies src exists and removes +// the dst if it exists. +func CopyFile(src, dst string) (int64, error) { + cleanSrc := filepath.Clean(src) + cleanDst := filepath.Clean(dst) + if cleanSrc == cleanDst { + return 0, nil + } + sf, err := os.Open(cleanSrc) + if err != nil { + return 0, err + } + defer sf.Close() + if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { + return 0, err + } + df, err := os.Create(cleanDst) + if err != nil { + return 0, err + } + defer df.Close() + return io.Copy(df, sf) +} + +// ReadSymlinkedDirectory returns the target directory of a symlink. +// The target of the symbolic link may not be a file. +func ReadSymlinkedDirectory(path string) (string, error) { + var realPath string + var err error + if realPath, err = filepath.Abs(path); err != nil { + return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) + } + if realPath, err = filepath.EvalSymlinks(realPath); err != nil { + return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) + } + realPathInfo, err := os.Stat(realPath) + if err != nil { + return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) + } + if !realPathInfo.Mode().IsDir() { + return "", fmt.Errorf("canonical path points to a file '%s'", realPath) + } + return realPath, nil +} + +// CreateIfNotExists creates a file or a directory only if it does not already exist. +func CreateIfNotExists(path string, isDir bool) error { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + if isDir { + return os.MkdirAll(path, 0755) + } + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + f, err := os.OpenFile(path, os.O_CREATE, 0755) + if err != nil { + return err + } + f.Close() + } + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go new file mode 100644 index 000000000000..ccd648fac300 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go @@ -0,0 +1,27 @@ +package fileutils + +import ( + "os" + "os/exec" + "strconv" + "strings" +) + +// GetTotalUsedFds returns the number of used File Descriptors by +// executing `lsof -p PID` +func GetTotalUsedFds() int { + pid := os.Getpid() + + cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) + + output, err := cmd.CombinedOutput() + if err != nil { + return -1 + } + + outputStr := strings.TrimSpace(string(output)) + + fds := strings.Split(outputStr, "\n") + + return len(fds) - 1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go new file mode 100644 index 000000000000..0f2cb7ab9333 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. +// On Solaris these limits are per process and not systemwide +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go new file mode 100644 index 000000000000..9e0e97bd64ac --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go @@ -0,0 +1,22 @@ +// +build linux freebsd + +package fileutils + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/sirupsen/logrus" +) + +// GetTotalUsedFds Returns the number of used File Descriptors by +// reading it via /proc filesystem. +func GetTotalUsedFds() int { + if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { + logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) + } else { + return len(fds) + } + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go new file mode 100644 index 000000000000..5ec21cace526 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go @@ -0,0 +1,7 @@ +package fileutils + +// GetTotalUsedFds Returns the number of used File Descriptors. Not supported +// on Windows. +func GetTotalUsedFds() int { + return -1 +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir.go b/vendor/github.com/containers/storage/pkg/homedir/homedir.go deleted file mode 100644 index 8154e83f0c9d..000000000000 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir.go +++ /dev/null @@ -1,39 +0,0 @@ -package homedir - -import ( - "os" - "runtime" - - "github.com/opencontainers/runc/libcontainer/user" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - if runtime.GOOS == "windows" { - return "USERPROFILE" - } - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - home := os.Getenv(Key()) - if home == "" && runtime.GOOS != "windows" { - if u, err := user.CurrentUser(); err == nil { - return u.Home - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - if runtime.GOOS == "windows" { - return "%USERPROFILE%" // be careful while using in format functions - } - return "~" -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go new file mode 100644 index 000000000000..c001fbecbfb2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go @@ -0,0 +1,23 @@ +// +build linux + +package homedir + +import ( + "os" + + "github.com/containers/storage/pkg/idtools" +) + +// GetStatic returns the home directory for the current user without calling +// os/user.Current(). This is useful for static-linked binary on glibc-based +// system, because a call to os/user.Current() in a static binary leads to +// segfault due to a glibc issue that won't be fixed in a short term. +// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) +func GetStatic() (string, error) { + uid := os.Getuid() + usr, err := idtools.LookupUID(uid) + if err != nil { + return "", err + } + return usr.Home, nil +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go new file mode 100644 index 000000000000..6b96b856f67b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -0,0 +1,13 @@ +// +build !linux + +package homedir + +import ( + "errors" +) + +// GetStatic is not needed for non-linux systems. +// (Precisely, it is needed only for glibc-based linux systems.) +func GetStatic() (string, error) { + return "", errors.New("homedir.GetStatic() is not supported on this system") +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go new file mode 100644 index 000000000000..f2a20ea8f828 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -0,0 +1,34 @@ +// +build !windows + +package homedir + +import ( + "os" + + "github.com/opencontainers/runc/libcontainer/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + home := os.Getenv(Key()) + if home == "" { + if u, err := user.CurrentUser(); err == nil { + return u.Home + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "~" +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go new file mode 100644 index 000000000000..fafdb2bbf932 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go @@ -0,0 +1,24 @@ +package homedir + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go new file mode 100644 index 000000000000..68a072db2206 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -0,0 +1,279 @@ +package idtools + +import ( + "bufio" + "fmt" + "os" + "sort" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" +) + +// MkdirAllAs creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +// Deprecated: Use MkdirAllAndChown +func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +} + +// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +// Deprecated: Use MkdirAndChown with a IDPair +func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +} + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, true) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, false, true) +} + +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, false) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err + } + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err + } + return uid, gid, nil +} + +// toContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func toContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// toHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func toHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// IDPair is a UID and GID pair +type IDPair struct { + UID int + GID int +} + +// IDMappings contains a mappings of UIDs and GIDs +type IDMappings struct { + uids []IDMap + gids []IDMap +} + +// NewIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func NewIDMappings(username, groupname string) (*IDMappings, error) { + subuidRanges, err := parseSubuid(username) + if err != nil { + return nil, err + } + subgidRanges, err := parseSubgid(groupname) + if err != nil { + return nil, err + } + if len(subuidRanges) == 0 { + return nil, fmt.Errorf("No subuid ranges found for user %q", username) + } + if len(subgidRanges) == 0 { + return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + } + + return &IDMappings{ + uids: createIDMap(subuidRanges), + gids: createIDMap(subgidRanges), + }, nil +} + +// NewIDMappingsFromMaps creates a new mapping from two slices +// Deprecated: this is a temporary shim while transitioning to IDMapping +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { + return &IDMappings{uids: uids, gids: gids} +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i *IDMappings) RootPair() IDPair { + uid, gid, _ := GetRootUIDGID(i.uids, i.gids) + return IDPair{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = toHost(pair.UID, i.uids) + if err != nil { + return target, err + } + } + + if pair.GID != target.GID { + target.GID, err = toHost(pair.GID, i.gids) + } + return target, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { + uid, err := toContainer(pair.UID, i.uids) + if err != nil { + return -1, -1, err + } + gid, err := toContainer(pair.GID, i.gids) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i *IDMappings) Empty() bool { + return len(i.uids) == 0 && len(i.gids) == 0 +} + +// UIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) UIDs() []IDMap { + return i.uids +} + +// GIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) GIDs() []IDMap { + return i.gids +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go new file mode 100644 index 000000000000..b5870506a080 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -0,0 +1,204 @@ +// +build !windows + +package idtools + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/containers/storage/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" +) + +var ( + entOnce sync.Once + getentCmd string +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + paths = []string{path} + } else if err == nil && chownExisting { + // short-circuit--we were called with an existing directory and chown was requested + return os.Chown(path, ownerUID, ownerGID) + } else if err == nil { + // nothing to do; directory path fully exists already and chown was NOT requested + return nil + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { + return err + } + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, pair IDPair) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0100 == 0100) { + return true + } + if isGroup && (perms&0010 == 0010) { + return true + } + if perms&0001 == 0001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(username string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(username) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) +} + +func getentUser(args string) (user.User, error) { + reader, err := callGetent(args) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(groupname string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(groupname) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %d", "group", gid)) +} + +func getentGroup(args string) (user.Group, error) { + reader, err := callGetent(args) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) + } + return groups[0], nil +} + +func callGetent(args string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("") + } + out, err := execCmd(getentCmd, args) + if err != nil { + exitCode, errC := system.GetExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + terms := strings.Split(args, " ") + return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + + } + return bytes.NewReader(out), nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go new file mode 100644 index 000000000000..dbf6bc4c94c2 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go @@ -0,0 +1,25 @@ +// +build windows + +package idtools + +import ( + "os" + + "github.com/containers/storage/pkg/system" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, pair IDPair) bool { + return true +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 000000000000..9da7975e2c17 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,164 @@ +package idtools + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + + cmdTemplates = map[string]string{ + "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", + "useradd": "-r -s /bin/false %s", + "usermod": "-%s %d-%d %s", + } + + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 + userMod = "usermod" +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(userName string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], userName) + out, err := execCmd(userCommand, args) + if err != nil { + return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 000000000000..d98b354cbd82 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go new file mode 100644 index 000000000000..9703ecbd9d6a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go new file mode 100644 index 000000000000..607dbed43a0a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags.go @@ -0,0 +1,149 @@ +package mount + +import ( + "fmt" + "strings" +) + +var flags = map[string]struct { + clear bool + flag int +}{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, +} + +var validFlags = map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, +} + +var propagationFlags = map[string]bool{ + "bind": true, + "rbind": true, + "unbindable": true, + "runbindable": true, + "private": true, + "rprivate": true, + "shared": true, + "rshared": true, + "slave": true, + "rslave": true, +} + +// MergeTmpfsOptions merge mount options to make sure there is no duplicate. +func MergeTmpfsOptions(options []string) ([]string, error) { + // We use collisions maps to remove duplicates. + // For flag, the key is the flag value (the key for propagation flag is -1) + // For data=value, the key is the data + flagCollisions := map[int]bool{} + dataCollisions := map[string]bool{} + + var newOptions []string + // We process in reverse order + for i := len(options) - 1; i >= 0; i-- { + option := options[i] + if option == "defaults" { + continue + } + if f, ok := flags[option]; ok && f.flag != 0 { + // There is only one propagation mode + key := f.flag + if propagationFlags[option] { + key = -1 + } + // Check to see if there is collision for flag + if !flagCollisions[key] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + flagCollisions[key] = true + } + continue + } + opt := strings.SplitN(option, "=", 2) + if len(opt) != 2 || !validFlags[opt[0]] { + return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + } + if !dataCollisions[opt[0]] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + dataCollisions[opt[0]] = true + } + } + + return newOptions, nil +} + +// Parse fstab type mount options into mount() flags +// and device specific data +func parseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := parseOptions(options) + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go new file mode 100644 index 000000000000..5f76f331b63d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go @@ -0,0 +1,49 @@ +// +build freebsd,cgo + +package mount + +/* +#include +*/ +import "C" + +const ( + // RDONLY will mount the filesystem as read-only. + RDONLY = C.MNT_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = C.MNT_NOSUID + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = C.MNT_NOEXEC + + // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. + SYNCHRONOUS = C.MNT_SYNCHRONOUS + + // NOATIME will not update the file access time when reading from a file. + NOATIME = C.MNT_NOATIME +) + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NODEV = 0 + NODIRATIME = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIVE = 0 + RELATIME = 0 + REMOUNT = 0 + STRICTATIME = 0 + mntDetach = 0 +) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_linux.go b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go new file mode 100644 index 000000000000..0425d0dd633c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go @@ -0,0 +1,87 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = unix.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = unix.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = unix.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = unix.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = unix.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: create, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = unix.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = unix.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = unix.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = unix.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = unix.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = unix.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = unix.MS_BIND | unix.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = unix.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = unix.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = unix.MS_PRIVATE | unix.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = unix.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = unix.MS_SLAVE | unix.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = unix.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = unix.MS_SHARED | unix.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = unix.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = unix.MS_STRICTATIME + + mntDetach = unix.MNT_DETACH +) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go new file mode 100644 index 000000000000..9ed741e3ff5b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go @@ -0,0 +1,31 @@ +// +build !linux,!freebsd freebsd,!cgo solaris,!cgo + +package mount + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 + mntDetach = 0 +) diff --git a/vendor/github.com/containers/storage/pkg/mount/mount.go b/vendor/github.com/containers/storage/pkg/mount/mount.go new file mode 100644 index 000000000000..d3caa16bda2a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mount.go @@ -0,0 +1,106 @@ +package mount + +import ( + "sort" + "strings" + "time" + + "github.com/containers/storage/pkg/fileutils" +) + +// GetMounts retrieves a list of mounts for the current running process. +func GetMounts() ([]*Info, error) { + return parseMountTable() +} + +// Mounted determines if a specified mountpoint has been mounted. +// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. +func Mounted(mountpoint string) (bool, error) { + entries, err := parseMountTable() + if err != nil { + return false, err + } + + mountpoint, err = fileutils.ReadSymlinkedDirectory(mountpoint) + if err != nil { + return false, err + } + // Search the table for the mountpoint + for _, e := range entries { + if e.Mountpoint == mountpoint { + return true, nil + } + } + return false, nil +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, _ := parseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return ForceMount(device, target, mType, options) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := parseOptions(options) + return mount(device, target, mType, uintptr(flag), data) +} + +// Unmount lazily unmounts a filesystem on supported platforms, otherwise +// does a normal unmount. +func Unmount(target string) error { + if mounted, err := Mounted(target); err != nil || !mounted { + return err + } + return ForceUnmount(target) +} + +// RecursiveUnmount unmounts the target and all mounts underneath, starting with +// the deepsest mount first. +func RecursiveUnmount(target string) error { + mounts, err := GetMounts() + if err != nil { + return err + } + + // Make the deepest mount be first + sort.Sort(sort.Reverse(byMountpoint(mounts))) + + for i, m := range mounts { + if !strings.HasPrefix(m.Mountpoint, target) { + continue + } + if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { + if mounted, err := Mounted(m.Mountpoint); err != nil || mounted { + return err + } + // Ignore errors for submounts and continue trying to unmount others + // The final unmount should fail if there ane any submounts remaining + } + } + return nil +} + +// ForceUnmount will force an unmount of the target filesystem, regardless if +// it is mounted or not. +func ForceUnmount(target string) (err error) { + // Simple retry logic for unmount + for i := 0; i < 10; i++ { + if err = unmount(target, 0); err == nil { + return nil + } + time.Sleep(100 * time.Millisecond) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go new file mode 100644 index 000000000000..814896cc9e6b --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -0,0 +1,60 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "unsafe" + + "golang.org/x/sys/unix" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} + +func unmount(target string, flag int) error { + return unix.Unmount(target, flag) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go new file mode 100644 index 000000000000..39c36d472a93 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go @@ -0,0 +1,57 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // ptypes is the set propagation types. + ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE + + // pflags is the full set valid flags for a change propagation call. + pflags = ptypes | unix.MS_REC | unix.MS_SILENT + + // broflags is the combination of bind and read only + broflags = unix.MS_BIND | unix.MS_RDONLY +) + +// isremount returns true if either device name or flags identify a remount request, false otherwise. +func isremount(device string, flags uintptr) bool { + switch { + // We treat device "" and "none" as a remount request to provide compatibility with + // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. + case flags&unix.MS_REMOUNT != 0, device == "", device == "none": + return true + default: + return false + } +} + +func mount(device, target, mType string, flags uintptr, data string) error { + oflags := flags &^ ptypes + if !isremount(device, flags) || data != "" { + // Initial call applying all non-propagation flags for mount + // or remount with changed data + if err := unix.Mount(device, target, mType, oflags, data); err != nil { + return err + } + } + + if flags&ptypes != 0 { + // Change the propagation type. + if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { + return err + } + } + + if oflags&broflags == broflags { + // Remount the bind to apply read only. + return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "") + } + + return nil +} + +func unmount(target string, flag int) error { + return unix.Unmount(target, flag) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go b/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go new file mode 100644 index 000000000000..48b86771e7be --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go @@ -0,0 +1,34 @@ +// +build solaris,cgo + +package mount + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// #include +// #include +// #include +// int Mount(const char *spec, const char *dir, int mflag, +// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { +// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); +// } +import "C" + +func mount(device, target, mType string, flag uintptr, data string) error { + spec := C.CString(device) + dir := C.CString(target) + fstype := C.CString(mType) + _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) + C.free(unsafe.Pointer(spec)) + C.free(unsafe.Pointer(dir)) + C.free(unsafe.Pointer(fstype)) + return err +} + +func unmount(target string, flag int) error { + err := unix.Unmount(target, flag) + return err +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go new file mode 100644 index 000000000000..a2a3bb457fcb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go @@ -0,0 +1,11 @@ +// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go new file mode 100644 index 000000000000..ff4cc1d86b6d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go @@ -0,0 +1,54 @@ +package mount + +// Info reveals information about a particular mounted filesystem. This +// struct is populated from the content in the /proc//mountinfo file. +type Info struct { + // ID is a unique identifier of the mount (may be reused after umount). + ID int + + // Parent indicates the ID of the mount parent (or of self for the top of the + // mount tree). + Parent int + + // Major indicates one half of the device ID which identifies the device class. + Major int + + // Minor indicates one half of the device ID which identifies a specific + // instance of device. + Minor int + + // Root of the mount within the filesystem. + Root string + + // Mountpoint indicates the mount point relative to the process's root. + Mountpoint string + + // Opts represents mount-specific options. + Opts string + + // Optional represents optional fields. + Optional string + + // Fstype indicates the type of filesystem, such as EXT3. + Fstype string + + // Source indicates filesystem specific information or "none". + Source string + + // VfsOpts represents per super block options. + VfsOpts string +} + +type byMountpoint []*Info + +func (by byMountpoint) Len() int { + return len(by) +} + +func (by byMountpoint) Less(i, j int) bool { + return by[i].Mountpoint < by[j].Mountpoint +} + +func (by byMountpoint) Swap(i, j int) { + by[i], by[j] = by[j], by[i] +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go new file mode 100644 index 000000000000..4f32edcd906a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go @@ -0,0 +1,41 @@ +package mount + +/* +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "reflect" + "unsafe" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts. +func parseMountTable() ([]*Info, error) { + var rawEntries *C.struct_statfs + + count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) + if count == 0 { + return nil, fmt.Errorf("Failed to call getmntinfo") + } + + var entries []C.struct_statfs + header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) + header.Cap = count + header.Len = count + header.Data = uintptr(unsafe.Pointer(rawEntries)) + + var out []*Info + for _, entry := range entries { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) + mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + out = append(out, &mountinfo) + } + return out, nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go new file mode 100644 index 000000000000..be69fee1d7bb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go @@ -0,0 +1,95 @@ +// +build linux + +package mount + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +const ( + /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options*/ + mountinfoFormat = "%d %d %d:%d %s %s %s %s" +) + +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable() ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} + +func parseInfoFile(r io.Reader) ([]*Info, error) { + var ( + s = bufio.NewScanner(r) + out = []*Info{} + ) + + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + + var ( + p = &Info{} + text = s.Text() + optionalFields string + ) + + if _, err := fmt.Sscanf(text, mountinfoFormat, + &p.ID, &p.Parent, &p.Major, &p.Minor, + &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { + return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + } + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + if len(postSeparatorFields) < 3 { + return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + if optionalFields != "-" { + p.Optional = optionalFields + } + + p.Fstype = postSeparatorFields[0] + p.Source = postSeparatorFields[1] + p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") + out = append(out, p) + } + return out, nil +} + +// PidMountInfo collects the mounts for a specific process ID. If the process +// ID is unknown, it is better to use `GetMounts` which will inspect +// "/proc/self/mountinfo" instead. +func PidMountInfo(pid int) ([]*Info, error) { + f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_solaris.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_solaris.go new file mode 100644 index 000000000000..ad9ab57f8b8e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_solaris.go @@ -0,0 +1,37 @@ +// +build solaris,cgo + +package mount + +/* +#include +#include +*/ +import "C" + +import ( + "fmt" +) + +func parseMountTable() ([]*Info, error) { + mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) + if mnttab == nil { + return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) + } + + var out []*Info + var mp C.struct_mnttab + + ret := C.getmntent(mnttab, &mp) + for ret == 0 { + var mountinfo Info + mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) + mountinfo.Source = C.GoString(mp.mnt_special) + mountinfo.Fstype = C.GoString(mp.mnt_fstype) + mountinfo.Opts = C.GoString(mp.mnt_mntopts) + out = append(out, &mountinfo) + ret = C.getmntent(mnttab, &mp) + } + + C.fclose(mnttab) + return out, nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go new file mode 100644 index 000000000000..7fbcf19214b9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go @@ -0,0 +1,12 @@ +// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo + +package mount + +import ( + "fmt" + "runtime" +) + +func parseMountTable() ([]*Info, error) { + return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_windows.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_windows.go new file mode 100644 index 000000000000..dab8a37ed01d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_windows.go @@ -0,0 +1,6 @@ +package mount + +func parseMountTable() ([]*Info, error) { + // Do NOT return an error! + return nil, nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 000000000000..8ceec84bc6c8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,69 @@ +// +build linux + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + mounted, err := Mounted(mountPoint) + if err != nil { + return err + } + + if !mounted { + if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { + return err + } + } + if _, err = Mounted(mountPoint); err != nil { + return err + } + + return ForceMount("", mountPoint, "none", options) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go new file mode 100644 index 000000000000..09f6b03cbc0c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go @@ -0,0 +1,58 @@ +// +build solaris + +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "shared") +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, "rshared") +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "private") +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, "rprivate") +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "slave") +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, "rslave") +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "unbindable") +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, "runbindable") +} + +func ensureMountedAs(mountPoint, options string) error { + // TODO: Solaris does not support bind mounts. + // Evaluate lofs and also look at the relevant + // mount flags to be supported. + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes.go b/vendor/github.com/containers/storage/pkg/system/chtimes.go new file mode 100644 index 000000000000..056d19954d63 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes.go @@ -0,0 +1,35 @@ +package system + +import ( + "os" + "time" +) + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + if err := setCTime(name, mtime); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go new file mode 100644 index 000000000000..09d58bcbfdd4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +import ( + "time" +) + +//setCTime will set the create time on a file. On Unix, the create +//time is updated as a side effect of setting the modified time, so +//no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go new file mode 100644 index 000000000000..45428c141ca9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go @@ -0,0 +1,28 @@ +// +build windows + +package system + +import ( + "time" + + "golang.org/x/sys/windows" +) + +//setCTime will set the create time on a file. On Windows, this requires +//calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := windows.NsecToTimespec(ctime.UnixNano()) + pathp, e := windows.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer windows.Close(h) + c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + return windows.SetFileTime(h, &c, nil, nil) +} diff --git a/vendor/github.com/containers/storage/pkg/system/errors.go b/vendor/github.com/containers/storage/pkg/system/errors.go new file mode 100644 index 000000000000..288318985e3e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/errors.go @@ -0,0 +1,10 @@ +package system + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/vendor/github.com/containers/storage/pkg/system/exitcode.go b/vendor/github.com/containers/storage/pkg/system/exitcode.go new file mode 100644 index 000000000000..60f0514b1ddd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/exitcode.go @@ -0,0 +1,33 @@ +package system + +import ( + "fmt" + "os/exec" + "syscall" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +// ProcessExitCode process the specified error and returns the exit status code +// if the error was of type exec.ExitError, returns nothing otherwise. +func ProcessExitCode(err error) (exitCode int) { + if err != nil { + var exiterr error + if exitCode, exiterr = GetExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/system/filesys.go b/vendor/github.com/containers/storage/pkg/system/filesys.go new file mode 100644 index 000000000000..102565f7601a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/filesys.go @@ -0,0 +1,67 @@ +// +build !windows + +package system + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return MkdirAll(path, perm, sddl) +} + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode, sddl string) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +// The functions below here are wrappers for the equivalents in the os and ioutils packages. +// They are passthrough on Unix platforms, and only relevant on Windows. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return os.Create(name) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +// TempFileSequential creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return ioutil.TempFile(dir, prefix) +} diff --git a/vendor/github.com/containers/storage/pkg/system/filesys_windows.go b/vendor/github.com/containers/storage/pkg/system/filesys_windows.go new file mode 100644 index 000000000000..a61b53d0ba35 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/filesys_windows.go @@ -0,0 +1,298 @@ +// +build windows + +package system + +import ( + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + winio "github.com/Microsoft/go-winio" + "golang.org/x/sys/windows" +) + +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System + SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// with an appropriate SDDL defined ACL. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return mkdirall(path, true, sddl) +} + +// MkdirAll implementation that is volume path aware for Windows. +func MkdirAll(path string, _ os.FileMode, sddl string) error { + return mkdirall(path, false, sddl) +} + +// mkdirall is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and can create a directory with +// a DACL. +func mkdirall(path string, applyACL bool, sddl string) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is largely copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = mkdirall(path[0:j-1], false, sddl) + if err != nil { + return err + } + } + + // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. + if applyACL { + err = mkdirWithACL(path, sddl) + } else { + err = os.Mkdir(path, 0) + } + + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// mkdirWithACL creates a new directory. If there is an error, it will be of +// type *PathError. . +// +// This is a modified and combined version of os.Mkdir and windows.Mkdir +// in golang to cater for creating a directory am ACL permitting full +// access, with inheritance, to any subfolder/file for Built-in Administrators +// and Local System. +func mkdirWithACL(name string, sddl string) error { + sa := windows.SecurityAttributes{Length: 0} + sd, err := winio.SddlToSecurityDescriptor(sddl) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) + + namep, err := windows.UTF16PtrFromString(name) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + + e := windows.CreateDirectory(namep, &sa) + if e != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: e} + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if !filepath.IsAbs(path) { + if !strings.HasPrefix(path, string(os.PathSeparator)) { + return false + } + } + return true +} + +// The origin of the functions below here are the golang OS and windows packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDONLY, 0) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, errf := windowsOpenFileSequential(name, flag, 0) + if errf == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: errf} +} + +func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *windows.SecurityAttributes { + var sa windows.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { + if len(path) == 0 { + return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND + } + pathp, err := windows.UTF16PtrFromString(path) + if err != nil { + return windows.InvalidHandle, err + } + var access uint32 + switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { + case windows.O_RDONLY: + access = windows.GENERIC_READ + case windows.O_WRONLY: + access = windows.GENERIC_WRITE + case windows.O_RDWR: + access = windows.GENERIC_READ | windows.GENERIC_WRITE + } + if mode&windows.O_CREAT != 0 { + access |= windows.GENERIC_WRITE + } + if mode&windows.O_APPEND != 0 { + access &^= windows.GENERIC_WRITE + access |= windows.FILE_APPEND_DATA + } + sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) + var sa *windows.SecurityAttributes + if mode&windows.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): + createmode = windows.CREATE_NEW + case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): + createmode = windows.CREATE_ALWAYS + case mode&windows.O_CREAT == windows.O_CREAT: + createmode = windows.OPEN_ALWAYS + case mode&windows.O_TRUNC == windows.O_TRUNC: + createmode = windows.TRUNCATE_EXISTING + default: + createmode = windows.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} + +// Helpers for TempFileSequential +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential +// file access. Below is the original comment from golang: +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/system/init.go b/vendor/github.com/containers/storage/pkg/system/init.go new file mode 100644 index 000000000000..17935088dedf --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/init.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/vendor/github.com/containers/storage/pkg/system/init_windows.go b/vendor/github.com/containers/storage/pkg/system/init_windows.go new file mode 100644 index 000000000000..019c66441ce9 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/init_windows.go @@ -0,0 +1,17 @@ +package system + +import "os" + +// LCOWSupported determines if Linux Containers on Windows are supported. +// Note: This feature is in development (06/17) and enabled through an +// environment variable. At a future time, it will be enabled based +// on build number. @jhowardmsft +var lcowSupported = false + +func init() { + // LCOW initialization + if os.Getenv("LCOW_SUPPORTED") != "" { + lcowSupported = true + } + +} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go new file mode 100644 index 000000000000..cff33bb40856 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_windows.go b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go new file mode 100644 index 000000000000..e54d01e696b5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go @@ -0,0 +1,6 @@ +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return lcowSupported +} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go new file mode 100644 index 000000000000..bd23c4d50b21 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go @@ -0,0 +1,19 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_windows.go b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go new file mode 100644 index 000000000000..e51df0dafeb1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go @@ -0,0 +1,14 @@ +package system + +import "os" + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return fromStatT(&fi) +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo.go b/vendor/github.com/containers/storage/pkg/system/meminfo.go new file mode 100644 index 000000000000..3b6e947e6753 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go b/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go new file mode 100644 index 000000000000..385f1d5e735a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go @@ -0,0 +1,65 @@ +package system + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go new file mode 100644 index 000000000000..925776e789bf --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go @@ -0,0 +1,129 @@ +// +build solaris,cgo + +package system + +import ( + "fmt" + "unsafe" +) + +// #cgo CFLAGS: -std=c99 +// #cgo LDFLAGS: -lkstat +// #include +// #include +// #include +// #include +// #include +// #include +// struct swaptable *allocSwaptable(int num) { +// struct swaptable *st; +// struct swapent *swapent; +// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); +// swapent = st->swt_ent; +// for (int i = 0; i < num; i++,swapent++) { +// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); +// } +// st->swt_n = num; +// return st; +//} +// void freeSwaptable (struct swaptable *st) { +// struct swapent *swapent = st->swt_ent; +// for (int i = 0; i < st->swt_n; i++,swapent++) { +// free(swapent->ste_path); +// } +// free(st); +// } +// swapent_t getSwapEnt(swapent_t *ent, int i) { +// return ent[i]; +// } +// int64_t getPpKernel() { +// int64_t pp_kernel = 0; +// kstat_ctl_t *ksc; +// kstat_t *ks; +// kstat_named_t *knp; +// kid_t kid; +// +// if ((ksc = kstat_open()) == NULL) { +// return -1; +// } +// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { +// return -1; +// } +// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || +// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { +// return -1; +// } +// switch (knp->data_type) { +// case KSTAT_DATA_UINT64: +// pp_kernel = knp->value.ui64; +// break; +// case KSTAT_DATA_UINT32: +// pp_kernel = knp->value.ui32; +// break; +// } +// pp_kernel *= sysconf(_SC_PAGESIZE); +// return (pp_kernel > 0 ? pp_kernel : -1); +// } +import "C" + +// Get the system memory info using sysconf same as prtconf +func getTotalMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_PHYS_PAGES) + return int64(pagesize * npages) +} + +func getFreeMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_AVPHYS_PAGES) + return int64(pagesize * npages) +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + + ppKernel := C.getPpKernel() + MemTotal := getTotalMem() + MemFree := getFreeMem() + SwapTotal, SwapFree, err := getSysSwap() + + if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || + SwapFree < 0 { + return nil, fmt.Errorf("error getting system memory info %v\n", err) + } + + meminfo := &MemInfo{} + // Total memory is total physical memory less than memory locked by kernel + meminfo.MemTotal = MemTotal - int64(ppKernel) + meminfo.MemFree = MemFree + meminfo.SwapTotal = SwapTotal + meminfo.SwapFree = SwapFree + + return meminfo, nil +} + +func getSysSwap() (int64, int64, error) { + var tSwap int64 + var fSwap int64 + var diskblksPerPage int64 + num, err := C.swapctl(C.SC_GETNSWP, nil) + if err != nil { + return -1, -1, err + } + st := C.allocSwaptable(num) + _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) + if err != nil { + C.freeSwaptable(st) + return -1, -1, err + } + + diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) + for i := 0; i < int(num); i++ { + swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) + tSwap += int64(swapent.ste_pages) * diskblksPerPage + fSwap += int64(swapent.ste_free) * diskblksPerPage + } + C.freeSwaptable(st) + return tSwap, fSwap, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go new file mode 100644 index 000000000000..3ce019dffdda --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows,!solaris + +package system + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go new file mode 100644 index 000000000000..883944a4c536 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go @@ -0,0 +1,45 @@ +package system + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go new file mode 100644 index 000000000000..af79a6538333 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go new file mode 100644 index 000000000000..2e863c0215b3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package system + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/vendor/github.com/containers/storage/pkg/system/path.go b/vendor/github.com/containers/storage/pkg/system/path.go new file mode 100644 index 000000000000..f634a6be673e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path.go @@ -0,0 +1,21 @@ +package system + +import "runtime" + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(platform string) string { + if runtime.GOOS == "windows" { + if platform != runtime.GOOS && LCOWSupported() { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv + +} diff --git a/vendor/github.com/containers/storage/pkg/system/path_unix.go b/vendor/github.com/containers/storage/pkg/system/path_unix.go new file mode 100644 index 000000000000..f3762e69d36a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package system + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. This is a no-op on Linux. +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go new file mode 100644 index 000000000000..aab891522db8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path_windows.go @@ -0,0 +1,33 @@ +// +build windows + +package system + +import ( + "fmt" + "path/filepath" + "strings" +) + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !filepath.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/process_unix.go b/vendor/github.com/containers/storage/pkg/system/process_unix.go new file mode 100644 index 000000000000..26c8b42c1769 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/process_unix.go @@ -0,0 +1,24 @@ +// +build linux freebsd solaris darwin + +package system + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := unix.Kill(pid, syscall.Signal(0)) + if err == nil || err == unix.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + unix.Kill(pid, unix.SIGKILL) +} diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go new file mode 100644 index 000000000000..fc03c3e6b6db --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/rm.go @@ -0,0 +1,80 @@ +package system + +import ( + "os" + "syscall" + "time" + + "github.com/containers/storage/pkg/mount" + "github.com/pkg/errors" +) + +// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can +// often be remedied. +// Only use `EnsureRemoveAll` if you really want to make every effort to remove +// a directory. +// +// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there +// can be a race between reading directory entries and then actually attempting +// to remove everything in the directory. +// These types of errors do not need to be returned since it's ok for the dir to +// be gone we can just retry the remove operation. +// +// This should not return a `os.ErrNotExist` kind of error under any circumstances +func EnsureRemoveAll(dir string) error { + notExistErr := make(map[string]bool) + + // track retries + exitOnErr := make(map[string]int) + maxRetry := 5 + + // Attempt to unmount anything beneath this dir first + mount.RecursiveUnmount(dir) + + for { + err := os.RemoveAll(dir) + if err == nil { + return err + } + + pe, ok := err.(*os.PathError) + if !ok { + return err + } + + if os.IsNotExist(err) { + if notExistErr[pe.Path] { + return err + } + notExistErr[pe.Path] = true + + // There is a race where some subdir can be removed but after the parent + // dir entries have been read. + // So the path could be from `os.Remove(subdir)` + // If the reported non-existent path is not the passed in `dir` we + // should just retry, but otherwise return with no error. + if pe.Path == dir { + return nil + } + continue + } + + if pe.Err != syscall.EBUSY { + return err + } + + if mounted, _ := mount.Mounted(pe.Path); mounted { + if e := mount.Unmount(pe.Path); e != nil { + if mounted, _ := mount.Mounted(pe.Path); mounted { + return errors.Wrapf(e, "error while removing %s", dir) + } + } + } + + if exitOnErr[pe.Path] == maxRetry { + return err + } + exitOnErr[pe.Path]++ + time.Sleep(100 * time.Millisecond) + } +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go new file mode 100644 index 000000000000..715f05b9387f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go new file mode 100644 index 000000000000..715f05b9387f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go new file mode 100644 index 000000000000..1939f95181a0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go @@ -0,0 +1,19 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} + +// FromStatT converts a syscall.Stat_t type to a system.Stat_t type +// This is exposed on Linux as pkg/archive/changes uses it. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go new file mode 100644 index 000000000000..b607dea946f8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go new file mode 100644 index 000000000000..b607dea946f8 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go new file mode 100644 index 000000000000..91c7d121cc7f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_unix.go @@ -0,0 +1,60 @@ +// +build !windows + +package system + +import ( + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, err + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go new file mode 100644 index 000000000000..6c63972682a4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go @@ -0,0 +1,49 @@ +package system + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like permission, size, etc about a file. +type StatT struct { + mode os.FileMode + size int64 + mtim time.Time +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return os.FileMode(s.mode) +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() time.Time { + return time.Time(s.mtim) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + return fromStatT(&fi) +} + +// fromStatT converts a os.FileInfo type to a system.StatT type +func fromStatT(fi *os.FileInfo) (*StatT, error) { + return &StatT{ + size: (*fi).Size(), + mode: (*fi).Mode(), + mtim: (*fi).ModTime()}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go new file mode 100644 index 000000000000..49dbdd378173 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go @@ -0,0 +1,17 @@ +// +build linux freebsd + +package system + +import "golang.org/x/sys/unix" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return unix.Unmount(dest, 0) +} + +// CommandLineToArgv should not be used on Unix. +// It simply returns commandLine in the only element in the returned array. +func CommandLineToArgv(commandLine string) ([]string, error) { + return []string{commandLine}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go new file mode 100644 index 000000000000..23e9b207c756 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go @@ -0,0 +1,122 @@ +package system + +import ( + "unsafe" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +var ( + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +// IsWindowsClient returns true if the SKU is client +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// IsIoTCore returns true if the currently running image is based off of +// Windows 10 IoT Core. +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsIoTCore() bool { + var returnedProductType uint32 + r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) + if r1 == 0 { + logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) + return false + } + const productIoTUAP = 0x0000007B + const productIoTUAPCommercial = 0x00000083 + return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} + +// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. +func CommandLineToArgv(commandLine string) ([]string, error) { + var argc int32 + + argsPtr, err := windows.UTF16PtrFromString(commandLine) + if err != nil { + return nil, err + } + + argv, err := windows.CommandLineToArgv(argsPtr, &argc) + if err != nil { + return nil, err + } + defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) + + newArgs := make([]string, argc) + for i, v := range (*argv)[:argc] { + newArgs[i] = string(windows.UTF16ToString((*v)[:])) + } + + return newArgs, nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/umask.go b/vendor/github.com/containers/storage/pkg/system/umask.go new file mode 100644 index 000000000000..5a10eda5afb0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return unix.Umask(newmask), nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/umask_windows.go b/vendor/github.com/containers/storage/pkg/system/umask_windows.go new file mode 100644 index 000000000000..13f1de1769c7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/umask_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package system + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go new file mode 100644 index 000000000000..6a77524376db --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go @@ -0,0 +1,24 @@ +package system + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_linux.go b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go new file mode 100644 index 000000000000..edc588a63f36 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go @@ -0,0 +1,25 @@ +package system + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + atFdCwd := unix.AT_FDCWD + + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go new file mode 100644 index 000000000000..139714544d02 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd + +package system + +import "syscall" + +// LUtimesNano is only supported on linux and freebsd. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go new file mode 100644 index 000000000000..98b111be426e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -0,0 +1,29 @@ +package system + +import "golang.org/x/sys/unix" + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) + if errno == unix.ENODATA { + return nil, nil + } + if errno == unix.ERANGE { + dest = make([]byte, sz) + sz, errno = unix.Lgetxattr(path, attr, dest) + } + if errno != nil { + return nil, errno + } + + return dest[:sz], nil +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return unix.Lsetxattr(path, attr, data, flags) +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go new file mode 100644 index 000000000000..0114f2227cf0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/coreos/bbolt/.gitignore b/vendor/github.com/coreos/bbolt/.gitignore new file mode 100644 index 000000000000..c2a8cfa788c0 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/.gitignore @@ -0,0 +1,5 @@ +*.prof +*.test +*.swp +/bin/ +cmd/bolt/bolt diff --git a/vendor/github.com/boltdb/bolt/LICENSE b/vendor/github.com/coreos/bbolt/LICENSE similarity index 100% rename from vendor/github.com/boltdb/bolt/LICENSE rename to vendor/github.com/coreos/bbolt/LICENSE diff --git a/vendor/github.com/coreos/bbolt/Makefile b/vendor/github.com/coreos/bbolt/Makefile new file mode 100644 index 000000000000..43b94f3bdfe8 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/Makefile @@ -0,0 +1,30 @@ +BRANCH=`git rev-parse --abbrev-ref HEAD` +COMMIT=`git rev-parse --short HEAD` +GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" + +default: build + +race: + @go test -v -race -test.run="TestSimulate_(100op|1000op)" + +fmt: + !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') + +# go get honnef.co/go/tools/simple +gosimple: + gosimple ./... + +# go get honnef.co/go/tools/unused +unused: + unused ./... + +# go get github.com/kisielk/errcheck +errcheck: + @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/coreos/bbolt + +test: + go test -timeout 20m -v -coverprofile cover.out -covermode atomic + # Note: gets "program not an importable package" in out of path builds + go test -v ./cmd/bolt + +.PHONY: race fmt errcheck test gosimple unused diff --git a/vendor/github.com/coreos/bbolt/README.md b/vendor/github.com/coreos/bbolt/README.md new file mode 100644 index 000000000000..015f0efbe845 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/README.md @@ -0,0 +1,928 @@ +bbolt +==== + +[![Go Report Card](https://goreportcard.com/badge/github.com/coreos/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/coreos/bbolt) +[![Coverage](https://codecov.io/gh/coreos/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/coreos/bbolt) +[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/bbolt) + +bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value +store. The purpose of this fork is to provide the Go community with an active +maintenance and development target for Bolt; the goal is improved reliability +and stability. bbolt includes bug fixes, performance enhancements, and features +not found in Bolt while preserving backwards compatibility with the Bolt API. + +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, +fast, and reliable database for projects that don't require a full database +server such as Postgres or MySQL. + +Since Bolt is meant to be used as such a low-level piece of functionality, +simplicity is key. The API will be small and only focus on getting values +and setting values. That's it. + +[gh_ben]: https://github.com/benbjohnson +[bolt]: https://github.com/boltdb/bolt +[hyc_symas]: https://twitter.com/hyc_symas +[lmdb]: http://symas.com/mdb/ + +## Project Status + +Bolt is stable, the API is fixed, and the file format is fixed. Full unit +test coverage and randomized black box testing are used to ensure database +consistency and thread safety. Bolt is currently used in high-load production +environments serving databases as large as 1TB. Many companies such as +Shopify and Heroku use Bolt-backed services every day. + +## Table of Contents + +- [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) +- [Resources](#resources) +- [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) +- [Caveats & Limitations](#caveats--limitations) +- [Reading the Source](#reading-the-source) +- [Other Projects Using Bolt](#other-projects-using-bolt) + +## Getting Started + +### Installing + +To start using Bolt, install Go and run `go get`: + +```sh +$ go get github.com/coreos/bbolt/... +``` + +This will retrieve the library and install the `bolt` command line utility into +your `$GOBIN` path. + + +### Opening a database + +The top-level object in Bolt is a `DB`. It is represented as a single file on +your disk and represents a consistent snapshot of your data. + +To open your database, simply use the `bolt.Open()` function: + +```go +package main + +import ( + "log" + + bolt "github.com/coreos/bbolt" +) + +func main() { + // Open the my.db data file in your current directory. + // It will be created if it doesn't exist. + db, err := bolt.Open("my.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + defer db.Close() + + ... +} +``` + +Please note that Bolt obtains a file lock on the data file so multiple processes +cannot open the same database at the same time. Opening an already open Bolt +database will cause it to hang until the other process closes it. To prevent +an indefinite wait you can pass a timeout option to the `Open()` function: + +```go +db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) +``` + + +### Transactions + +Bolt allows only one read-write transaction at a time but allows as many +read-only transactions as you want at a time. Each transaction has a consistent +view of the data as it existed when the transaction started. + +Individual transactions and all objects created from them (e.g. buckets, keys) +are not thread safe. To work with data in multiple goroutines you must start +a transaction for each one or use locking to ensure only one goroutine accesses +a transaction at a time. Creating transaction from the `DB` is thread safe. + +Read-only transactions and read-write transactions should not depend on one +another and generally shouldn't be opened simultaneously in the same goroutine. +This can cause a deadlock as the read-write transaction needs to periodically +re-map the data file but it cannot do so while a read-only transaction is open. + + +#### Read-write transactions + +To start a read-write transaction, you can use the `DB.Update()` function: + +```go +err := db.Update(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Inside the closure, you have a consistent view of the database. You commit the +transaction by returning `nil` at the end. You can also rollback the transaction +at any point by returning an error. All database operations are allowed inside +a read-write transaction. + +Always check the return error as it will report any disk failures that can cause +your transaction to not complete. If you return an error within your closure +it will be passed through. + + +#### Read-only transactions + +To start a read-only transaction, you can use the `DB.View()` function: + +```go +err := db.View(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +You also get a consistent view of the database within this closure, however, +no mutating operations are allowed within a read-only transaction. You can only +retrieve buckets, retrieve values, and copy the database within a read-only +transaction. + + +#### Batch read-write transactions + +Each `DB.Update()` waits for disk to commit the writes. This overhead +can be minimized by combining multiple updates with the `DB.Batch()` +function: + +```go +err := db.Batch(func(tx *bolt.Tx) error { + ... + return nil +}) +``` + +Concurrent Batch calls are opportunistically combined into larger +transactions. Batch is only useful when there are multiple goroutines +calling it. + +The trade-off is that `Batch` can call the given +function multiple times, if parts of the transaction fail. The +function must be idempotent and side effects must take effect only +after a successful return from `DB.Batch()`. + +For example: don't display messages from inside the function, instead +set variables in the enclosing scope: + +```go +var id uint64 +err := db.Batch(func(tx *bolt.Tx) error { + // Find last key in bucket, decode as bigendian uint64, increment + // by one, encode back to []byte, and add new key. + ... + id = newValue + return nil +}) +if err != nil { + return ... +} +fmt.Println("Allocated ID %d", id) +``` + + +#### Managing transactions manually + +The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` +function. These helper functions will start the transaction, execute a function, +and then safely close your transaction if an error is returned. This is the +recommended way to use Bolt transactions. + +However, sometimes you may want to manually start and end your transactions. +You can use the `DB.Begin()` function directly but **please** be sure to close +the transaction. + +```go +// Start a writable transaction. +tx, err := db.Begin(true) +if err != nil { + return err +} +defer tx.Rollback() + +// Use the transaction... +_, err := tx.CreateBucket([]byte("MyBucket")) +if err != nil { + return err +} + +// Commit the transaction and check for error. +if err := tx.Commit(); err != nil { + return err +} +``` + +The first argument to `DB.Begin()` is a boolean stating if the transaction +should be writable. + + +### Using buckets + +Buckets are collections of key/value pairs within the database. All keys in a +bucket must be unique. You can create a bucket using the `DB.CreateBucket()` +function: + +```go +db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte("MyBucket")) + if err != nil { + return fmt.Errorf("create bucket: %s", err) + } + return nil +}) +``` + +You can also create a bucket only if it doesn't exist by using the +`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this +function for all your top-level buckets after you open your database so you can +guarantee that they exist for future transactions. + +To delete a bucket, simply call the `Tx.DeleteBucket()` function. + + +### Using key/value pairs + +To save a key/value pair to a bucket, use the `Bucket.Put()` function: + +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Put([]byte("answer"), []byte("42")) + return err +}) +``` + +This will set the value of the `"answer"` key to `"42"` in the `MyBucket` +bucket. To retrieve this value, we can use the `Bucket.Get()` function: + +```go +db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + v := b.Get([]byte("answer")) + fmt.Printf("The answer is: %s\n", v) + return nil +}) +``` + +The `Get()` function does not return an error because its operation is +guaranteed to work (unless there is some kind of system failure). If the key +exists then it will return its byte slice value. If it doesn't exist then it +will return `nil`. It's important to note that you can have a zero-length value +set to a key which is different than the key not existing. + +Use the `Bucket.Delete()` function to delete a key from the bucket. + +Please note that values returned from `Get()` are only valid while the +transaction is open. If you need to use a value outside of the transaction +then you must use `copy()` to copy it to another byte slice. + + +### Autoincrementing integer for the bucket +By using the `NextSequence()` function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ := b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} +``` + +### Iterating over keys + +Bolt stores its keys in byte-sorted order within a bucket. This makes sequential +iteration over these keys extremely fast. To iterate over keys we'll use a +`Cursor`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + c := b.Cursor() + + for k, v := c.First(); k != nil; k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +The cursor allows you to move to a specific point in the list of keys and move +forward or backward through the keys one at a time. + +The following functions are available on the cursor: + +``` +First() Move to the first key. +Last() Move to the last key. +Seek() Move to a specific key. +Next() Move to the next key. +Prev() Move to the previous key. +``` + +Each of those functions has a return signature of `(key []byte, value []byte)`. +When you have iterated to the end of the cursor then `Next()` will return a +`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` +before calling `Next()` or `Prev()`. If you do not seek to a position then +these functions will return a `nil` key. + +During iteration, if the key is non-`nil` but the value is `nil`, that means +the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to +access the sub-bucket. + + +#### Prefix scans + +To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + c := tx.Bucket([]byte("MyBucket")).Cursor() + + prefix := []byte("1234") + for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { + fmt.Printf("key=%s, value=%s\n", k, v) + } + + return nil +}) +``` + +#### Range scans + +Another common use case is scanning over a range such as a time range. If you +use a sortable time encoding such as RFC3339 then you can query a specific +date range like this: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume our events bucket exists and has RFC3339 encoded time keys. + c := tx.Bucket([]byte("Events")).Cursor() + + // Our time range spans the 90's decade. + min := []byte("1990-01-01T00:00:00Z") + max := []byte("2000-01-01T00:00:00Z") + + // Iterate over the 90's. + for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { + fmt.Printf("%s: %s\n", k, v) + } + + return nil +}) +``` + +Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. + + +#### ForEach() + +You can also use the function `ForEach()` if you know you'll be iterating over +all the keys in a bucket: + +```go +db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys + b := tx.Bucket([]byte("MyBucket")) + + b.ForEach(func(k, v []byte) error { + fmt.Printf("key=%s, value=%s\n", k, v) + return nil + }) + return nil +}) +``` + +Please note that keys and values in `ForEach()` are only valid while +the transaction is open. If you need to use a key or value outside of +the transaction, you must use `copy()` to copy it to another byte +slice. + +### Nested buckets + +You can also store a bucket in a key to create nested buckets. The API is the +same as the bucket management API on the `DB` object: + +```go +func (*Bucket) CreateBucket(key []byte) (*Bucket, error) +func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) +func (*Bucket) DeleteBucket(key []byte) error +``` + +Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings. + +```go + +// createUser creates a new user in the given account. +func createUser(accountID int, u *User) error { + // Start the transaction. + tx, err := db.Begin(true) + if err != nil { + return err + } + defer tx.Rollback() + + // Retrieve the root bucket for the account. + // Assume this has already been created when the account was set up. + root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10))) + + // Setup the users bucket. + bkt, err := root.CreateBucketIfNotExists([]byte("USERS")) + if err != nil { + return err + } + + // Generate an ID for the new user. + userID, err := bkt.NextSequence() + if err != nil { + return err + } + u.ID = userID + + // Marshal and save the encoded user. + if buf, err := json.Marshal(u); err != nil { + return err + } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil { + return err + } + + // Commit the transaction. + if err := tx.Commit(); err != nil { + return err + } + + return nil +} + +``` + + + + +### Database backups + +Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` +function to write a consistent view of the database to a writer. If you call +this from a read-only transaction, it will perform a hot backup and not block +your other database reads and writes. + +By default, it will use a regular file handle which will utilize the operating +system's page cache. See the [`Tx`](https://godoc.org/github.com/coreos/bbolt#Tx) +documentation for information about optimizing for larger-than-RAM datasets. + +One common use case is to backup over HTTP so you can use tools like `cURL` to +do database backups: + +```go +func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { + err := db.View(func(tx *bolt.Tx) error { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) + w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) + _, err := tx.WriteTo(w) + return err + }) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} +``` + +Then you can backup using this command: + +```sh +$ curl http://localhost/backup > my.db +``` + +Or you can open your browser to `http://localhost/backup` and it will download +automatically. + +If you want to backup to another file you can use the `Tx.CopyFile()` helper +function. + + +### Statistics + +The database keeps a running count of many of the internal operations it +performs so you can better understand what's going on. By grabbing a snapshot +of these stats at two points in time we can see what operations were performed +in that time range. + +For example, we could start a goroutine to log stats every 10 seconds: + +```go +go func() { + // Grab the initial stats. + prev := db.Stats() + + for { + // Wait for 10s. + time.Sleep(10 * time.Second) + + // Grab the current stats and diff them. + stats := db.Stats() + diff := stats.Sub(&prev) + + // Encode stats to JSON and print to STDERR. + json.NewEncoder(os.Stderr).Encode(diff) + + // Save stats for the next loop. + prev = stats + } +}() +``` + +It's also useful to pipe these stats to a service such as statsd for monitoring +or to provide an HTTP endpoint that will perform a fixed-length sample. + + +### Read-Only Mode + +Sometimes it is useful to create a shared, read-only Bolt database. To this, +set the `Options.ReadOnly` flag when opening your database. Read-only mode +uses a shared lock to allow multiple processes to read from the database but +it will block any processes from opening the database in read-write mode. + +```go +db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +if err != nil { + log.Fatal(err) +} +``` + +### Mobile Use (iOS/Android) + +Bolt is able to run on mobile devices by leveraging the binding feature of the +[gomobile](https://github.com/golang/mobile) tool. Create a struct that will +contain your database logic and a reference to a `*bolt.DB` with a initializing +constructor that takes in a filepath where the database file will be stored. +Neither Android nor iOS require extra permissions or cleanup from using this method. + +```go +func NewBoltDB(filepath string) *BoltDB { + db, err := bolt.Open(filepath+"/demo.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + + return &BoltDB{db} +} + +type BoltDB struct { + db *bolt.DB + ... +} + +func (b *BoltDB) Path() string { + return b.db.Path() +} + +func (b *BoltDB) Close() { + b.db.Close() +} +``` + +Database logic should be defined as methods on this wrapper struct. + +To initialize this struct from the native language (both platforms now sync +their local storage to the cloud. These snippets disable that functionality for the +database file): + +#### Android + +```java +String path; +if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ + path = getNoBackupFilesDir().getAbsolutePath(); +} else{ + path = getFilesDir().getAbsolutePath(); +} +Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) +``` + +#### iOS + +```objc +- (void)demo { + NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, + NSUserDomainMask, + YES) objectAtIndex:0]; + GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); + [self addSkipBackupAttributeToItemAtPath:demo.path]; + //Some DB Logic would go here + [demo close]; +} + +- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString +{ + NSURL* URL= [NSURL fileURLWithPath: filePathString]; + assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); + + NSError *error = nil; + BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] + forKey: NSURLIsExcludedFromBackupKey error: &error]; + if(!success){ + NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); + } + return success; +} + +``` + +## Resources + +For more information on getting started with Bolt, check out the following articles: + +* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). +* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville + + +## Comparison with other databases + +### Postgres, MySQL, & other relational databases + +Relational databases structure data into rows and are only accessible through +the use of SQL. This approach provides flexibility in how you store and query +your data but also incurs overhead in parsing and planning SQL statements. Bolt +accesses all data by a byte slice key. This makes Bolt fast to read and write +data by key but provides no built-in support for joining values together. + +Most relational databases (with the exception of SQLite) are standalone servers +that run separately from your application. This gives your systems +flexibility to connect multiple application servers to a single database +server but also adds overhead in serializing and transporting data over the +network. Bolt runs as a library included in your application so all data access +has to go through your application's process. This brings data closer to your +application but limits multi-process access to the data. + + +### LevelDB, RocksDB + +LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that +they are libraries bundled into the application, however, their underlying +structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes +random writes by using a write ahead log and multi-tiered, sorted files called +SSTables. Bolt uses a B+tree internally and only a single file. Both approaches +have trade-offs. + +If you require a high random write throughput (>10,000 w/sec) or you need to use +spinning disks then LevelDB could be a good choice. If your application is +read-heavy or does a lot of range scans then Bolt could be a good choice. + +One other important consideration is that LevelDB does not have transactions. +It supports batch writing of key/values pairs and it supports read snapshots +but it will not give you the ability to do a compare-and-swap operation safely. +Bolt supports fully serializable ACID transactions. + + +### LMDB + +Bolt was originally a port of LMDB so it is architecturally similar. Both use +a B+tree, have ACID semantics with fully serializable transactions, and support +lock-free MVCC using a single writer and multiple readers. + +The two projects have somewhat diverged. LMDB heavily focuses on raw performance +while Bolt has focused on simplicity and ease of use. For example, LMDB allows +several unsafe actions such as direct writes for the sake of performance. Bolt +opts to disallow actions which can leave the database in a corrupted state. The +only exception to this in Bolt is `DB.NoSync`. + +There are also a few differences in API. LMDB requires a maximum mmap size when +opening an `mdb_env` whereas Bolt will handle incremental mmap resizing +automatically. LMDB overloads the getter and setter functions with multiple +flags whereas Bolt splits these specialized cases into their own functions. + + +## Caveats & Limitations + +It's important to pick the right tool for the job and Bolt is no exception. +Here are a few things to note when evaluating and using Bolt: + +* Bolt is good for read intensive workloads. Sequential write performance is + also fast but random writes can be slow. You can use `DB.Batch()` or add a + write-ahead log to help mitigate this issue. + +* Bolt uses a B+tree internally so there can be a lot of random page access. + SSDs provide a significant performance boost over spinning disks. + +* Try to avoid long running read transactions. Bolt uses copy-on-write so + old pages cannot be reclaimed while an old transaction is using them. + +* Byte slices returned from Bolt are only valid during a transaction. Once the + transaction has been committed or rolled back then the memory they point to + can be reused by a new page or can be unmapped from virtual memory and you'll + see an `unexpected fault address` panic when accessing it. + +* Bolt uses an exclusive write lock on the database file so it cannot be + shared by multiple processes. + +* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for + buckets that have random inserts will cause your database to have very poor + page utilization. + +* Use larger buckets in general. Smaller buckets causes poor page utilization + once they become larger than the page size (typically 4KB). + +* Bulk loading a lot of random writes into a new bucket can be slow as the + page will not split until the transaction is committed. Randomly inserting + more than 100,000 key/value pairs into a single new bucket in a single + transaction is not advised. + +* Bolt uses a memory-mapped file so the underlying operating system handles the + caching of the data. Typically, the OS will cache as much of the file as it + can in memory and will release memory as needed to other processes. This means + that Bolt can show very high memory usage when working with large databases. + However, this is expected and the OS will release memory as needed. Bolt can + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. + +* The data structures in the Bolt database are memory mapped so the data file + will be endian specific. This means that you cannot copy a Bolt file from a + little endian machine to a big endian machine and have it work. For most + users this is not a concern since most modern CPUs are little endian. + +* Because of the way pages are laid out on disk, Bolt cannot truncate data files + and return free pages back to the disk. Instead, Bolt maintains a free list + of unused pages within its data file. These free pages can be reused by later + transactions. This works well for many use cases as databases generally tend + to grow. However, it's important to note that deleting large chunks of data + will not allow you to reclaim that space on disk. + + For more information on page allocation, [see this comment][page-allocation]. + +[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 + + +## Reading the Source + +Bolt is a relatively small code base (<5KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + +## Other Projects Using Bolt + +Below is a list of public, open source projects that use Bolt: + +* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. +* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. +* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. +* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. +* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. +* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. +* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. +* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. +* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". +* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. +* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. +* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. +* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. +* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. +* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. +* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. +* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. +* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. +* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service +* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. +* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. +* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. +* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. +* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. +* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. +* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. +* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. +* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. +* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. +* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains +* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal. +* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support. +* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. +* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. +* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies +* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB + +If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/github.com/boltdb/bolt/appveyor.yml b/vendor/github.com/coreos/bbolt/appveyor.yml similarity index 100% rename from vendor/github.com/boltdb/bolt/appveyor.yml rename to vendor/github.com/coreos/bbolt/appveyor.yml diff --git a/vendor/github.com/coreos/bbolt/bolt_386.go b/vendor/github.com/coreos/bbolt/bolt_386.go new file mode 100644 index 000000000000..820d533c15fa --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_386.go @@ -0,0 +1,10 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/coreos/bbolt/bolt_amd64.go b/vendor/github.com/coreos/bbolt/bolt_amd64.go new file mode 100644 index 000000000000..98fafdb47d86 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_amd64.go @@ -0,0 +1,10 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/coreos/bbolt/bolt_arm.go b/vendor/github.com/coreos/bbolt/bolt_arm.go new file mode 100644 index 000000000000..7e5cb4b94128 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_arm.go @@ -0,0 +1,28 @@ +package bolt + +import "unsafe" + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned bool + +func init() { + // Simple check to see whether this arch handles unaligned load/stores + // correctly. + + // ARM9 and older devices require load/stores to be from/to aligned + // addresses. If not, the lower 2 bits are cleared and that address is + // read in a jumbled up order. + + // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html + + raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} + val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) + + brokenUnaligned = val != 0x11222211 +} diff --git a/vendor/github.com/coreos/bbolt/bolt_arm64.go b/vendor/github.com/coreos/bbolt/bolt_arm64.go new file mode 100644 index 000000000000..b26d84f91ba7 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_arm64.go @@ -0,0 +1,12 @@ +// +build arm64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/coreos/bbolt/bolt_linux.go similarity index 100% rename from vendor/github.com/boltdb/bolt/bolt_linux.go rename to vendor/github.com/coreos/bbolt/bolt_linux.go diff --git a/vendor/github.com/coreos/bbolt/bolt_mips64x.go b/vendor/github.com/coreos/bbolt/bolt_mips64x.go new file mode 100644 index 000000000000..134b578bd447 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_mips64x.go @@ -0,0 +1,12 @@ +// +build mips64 mips64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x8000000000 // 512GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/coreos/bbolt/bolt_mipsx.go b/vendor/github.com/coreos/bbolt/bolt_mipsx.go new file mode 100644 index 000000000000..d5ecb0597e45 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_mipsx.go @@ -0,0 +1,12 @@ +// +build mips mipsle + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x40000000 // 1GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/coreos/bbolt/bolt_openbsd.go similarity index 100% rename from vendor/github.com/boltdb/bolt/bolt_openbsd.go rename to vendor/github.com/coreos/bbolt/bolt_openbsd.go diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/coreos/bbolt/bolt_ppc.go similarity index 100% rename from vendor/github.com/boltdb/bolt/bolt_ppc.go rename to vendor/github.com/coreos/bbolt/bolt_ppc.go diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc64.go b/vendor/github.com/coreos/bbolt/bolt_ppc64.go new file mode 100644 index 000000000000..9331d9771eb3 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_ppc64.go @@ -0,0 +1,12 @@ +// +build ppc64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/coreos/bbolt/bolt_ppc64le.go similarity index 75% rename from vendor/github.com/boltdb/bolt/bolt_ppc64le.go rename to vendor/github.com/coreos/bbolt/bolt_ppc64le.go index 8351e129f6a3..8c143bc5d194 100644 --- a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go +++ b/vendor/github.com/coreos/bbolt/bolt_ppc64le.go @@ -7,3 +7,6 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB // maxAllocSize is the size used when creating array pointers. const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/coreos/bbolt/bolt_s390x.go b/vendor/github.com/coreos/bbolt/bolt_s390x.go new file mode 100644 index 000000000000..d7c39af92539 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/bolt_s390x.go @@ -0,0 +1,12 @@ +// +build s390x + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/coreos/bbolt/bolt_unix.go similarity index 80% rename from vendor/github.com/boltdb/bolt/bolt_unix.go rename to vendor/github.com/coreos/bbolt/bolt_unix.go index cad62dda1e38..06592a08089f 100644 --- a/vendor/github.com/boltdb/bolt/bolt_unix.go +++ b/vendor/github.com/coreos/bbolt/bolt_unix.go @@ -13,29 +13,32 @@ import ( // flock acquires an advisory lock on a file descriptor. func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + flag := syscall.LOCK_NB + if exclusive { + flag |= syscall.LOCK_EX + } else { + flag |= syscall.LOCK_SH + } for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - flag := syscall.LOCK_SH - if exclusive { - flag = syscall.LOCK_EX - } - - // Otherwise attempt to obtain an exclusive lock. - err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) + // Attempt to obtain an exclusive lock. + err := syscall.Flock(int(fd), flag) if err == nil { return nil } else if err != syscall.EWOULDBLOCK { return err } + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) + time.Sleep(flockRetryTimeout) } } diff --git a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go similarity index 75% rename from vendor/github.com/boltdb/bolt/bolt_unix_solaris.go rename to vendor/github.com/coreos/bbolt/bolt_unix_solaris.go index 307bf2b3ee97..fd8335ecc963 100644 --- a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go +++ b/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go @@ -13,34 +13,33 @@ import ( // flock acquires an advisory lock on a file descriptor. func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Pid = 0 - lock.Whence = 0 - lock.Pid = 0 - if exclusive { - lock.Type = syscall.F_WRLCK - } else { - lock.Type = syscall.F_RDLCK - } - err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) if err == nil { return nil } else if err != syscall.EAGAIN { return err } + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) + time.Sleep(flockRetryTimeout) } } diff --git a/vendor/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/coreos/bbolt/bolt_windows.go similarity index 88% rename from vendor/github.com/boltdb/bolt/bolt_windows.go rename to vendor/github.com/coreos/bbolt/bolt_windows.go index d538e6afd77a..ca6f9a11c241 100644 --- a/vendor/github.com/boltdb/bolt/bolt_windows.go +++ b/vendor/github.com/coreos/bbolt/bolt_windows.go @@ -59,29 +59,30 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro db.lockfile = f var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := f.Fd() + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - - var flag uint32 = flagLockFailImmediately - if exclusive { - flag |= flagLockExclusive - } - - err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + // Attempt to obtain an exclusive lock. + err := lockFileEx(syscall.Handle(fd), flag, 0, 1, 0, &syscall.Overlapped{}) if err == nil { return nil } else if err != errLockViolation { return err } + // If we timed oumercit then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) + time.Sleep(flockRetryTimeout) } } @@ -89,7 +90,7 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro func funlock(db *DB) error { err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) db.lockfile.Close() - os.Remove(db.path+lockExt) + os.Remove(db.path + lockExt) return err } diff --git a/vendor/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/coreos/bbolt/boltsync_unix.go similarity index 100% rename from vendor/github.com/boltdb/bolt/boltsync_unix.go rename to vendor/github.com/coreos/bbolt/boltsync_unix.go diff --git a/vendor/github.com/boltdb/bolt/bucket.go b/vendor/github.com/coreos/bbolt/bucket.go similarity index 95% rename from vendor/github.com/boltdb/bolt/bucket.go rename to vendor/github.com/coreos/bbolt/bucket.go index d2f8c524e42f..44db88b8abde 100644 --- a/vendor/github.com/boltdb/bolt/bucket.go +++ b/vendor/github.com/coreos/bbolt/bucket.go @@ -14,13 +14,6 @@ const ( MaxValueSize = (1 << 31) - 2 ) -const ( - maxUint = ^uint(0) - minUint = 0 - maxInt = int(^uint(0) >> 1) - minInt = -maxInt - 1 -) - const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) const ( @@ -130,9 +123,17 @@ func (b *Bucket) Bucket(name []byte) *Bucket { func (b *Bucket) openBucket(value []byte) *Bucket { var child = newBucket(b.tx) + // If unaligned load/stores are broken on this arch and value is + // unaligned simply clone to an aligned byte array. + unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 + + if unaligned { + value = cloneBytes(value) + } + // If this is a writable transaction then we need to copy the bucket entry. // Read-only transactions can point directly at the mmap entry. - if b.tx.writable { + if b.tx.writable && !unaligned { child.bucket = &bucket{} *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) } else { @@ -167,9 +168,8 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { if bytes.Equal(key, k) { if (flags & bucketLeafFlag) != 0 { return nil, ErrBucketExists - } else { - return nil, ErrIncompatibleValue } + return nil, ErrIncompatibleValue } // Create empty, inline bucket. @@ -316,7 +316,12 @@ func (b *Bucket) Delete(key []byte) error { // Move cursor to correct position. c := b.Cursor() - _, _, flags := c.seek(key) + k, _, flags := c.seek(key) + + // Return nil if the key doesn't exist. + if !bytes.Equal(key, k) { + return nil + } // Return an error if there is already existing bucket value. if (flags & bucketLeafFlag) != 0 { @@ -329,6 +334,28 @@ func (b *Bucket) Delete(key []byte) error { return nil } +// Sequence returns the current integer for the bucket without incrementing it. +func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } + +// SetSequence updates the sequence number for the bucket. +func (b *Bucket) SetSequence(v uint64) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence = v + return nil +} + // NextSequence returns an autoincrementing integer for the bucket. func (b *Bucket) NextSequence() (uint64, error) { if b.tx.db == nil { diff --git a/vendor/github.com/boltdb/bolt/cursor.go b/vendor/github.com/coreos/bbolt/cursor.go similarity index 100% rename from vendor/github.com/boltdb/bolt/cursor.go rename to vendor/github.com/coreos/bbolt/cursor.go diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/coreos/bbolt/db.go similarity index 85% rename from vendor/github.com/boltdb/bolt/db.go rename to vendor/github.com/coreos/bbolt/db.go index 1223493ca7be..4c8c156b23e9 100644 --- a/vendor/github.com/boltdb/bolt/db.go +++ b/vendor/github.com/coreos/bbolt/db.go @@ -7,8 +7,7 @@ import ( "log" "os" "runtime" - "runtime/debug" - "strings" + "sort" "sync" "time" "unsafe" @@ -23,6 +22,8 @@ const version = 2 // Represents a marker value to indicate that a file is a Bolt DB. const magic uint32 = 0xED0CDAED +const pgidNoFreelist pgid = 0xffffffffffffffff + // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when // syncing changes to a file. This is required as some operating systems, // such as OpenBSD, do not have a unified buffer cache (UBC) and writes @@ -39,6 +40,9 @@ const ( // default page size for db is set to the OS page size. var defaultPageSize = os.Getpagesize() +// The time elapsed between consecutive file locking attempts. +const flockRetryTimeout = 50 * time.Millisecond + // DB represents a collection of buckets persisted to a file on disk. // All data access is performed through transactions which can be obtained through the DB. // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. @@ -61,6 +65,11 @@ type DB struct { // THIS IS UNSAFE. PLEASE USE WITH CAUTION. NoSync bool + // When true, skips syncing freelist to disk. This improves the database + // write performance under normal operation, but requires a full database + // re-sync during recovery. + NoFreelistSync bool + // When true, skips the truncate call when growing the database. // Setting this to true is only safe on non-ext3/ext4 systems. // Skipping truncation avoids preallocation of hard drive space and @@ -107,9 +116,11 @@ type DB struct { opened bool rwtx *Tx txs []*Tx - freelist *freelist stats Stats + freelist *freelist + freelistLoad sync.Once + pagePool sync.Pool batchMu sync.Mutex @@ -148,14 +159,17 @@ func (db *DB) String() string { // If the file does not exist then it will be created automatically. // Passing in nil options will cause Bolt to open the database with the default options. func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - var db = &DB{opened: true} - + db := &DB{ + opened: true, + } // Set default options if no options are provided. if options == nil { options = DefaultOptions } + db.NoSync = options.NoSync db.NoGrowSync = options.NoGrowSync db.MmapFlags = options.MmapFlags + db.NoFreelistSync = options.NoFreelistSync // Set default values for later DB operations. db.MaxBatchSize = DefaultMaxBatchSize @@ -184,6 +198,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // The database file is locked using the shared lock (more than one process may // hold a lock at the same time) otherwise (options.ReadOnly is set). if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { + db.lockfile = nil // make 'unused' happy. TODO: rework locks _ = db.close() return nil, err } @@ -191,6 +206,11 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // Default values for test hooks db.ops.writeAt = db.file.WriteAt + if db.pageSize = options.PageSize; db.pageSize == 0 { + // Set the default page size to the OS page size. + db.pageSize = defaultPageSize + } + // Initialize the database if it doesn't exist. if info, err := db.file.Stat(); err != nil { return nil, err @@ -202,20 +222,21 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } else { // Read the first meta page to determine the page size. var buf [0x1000]byte - if _, err := db.file.ReadAt(buf[:], 0); err == nil { - m := db.pageInBuffer(buf[:], 0).meta() - if err := m.validate(); err != nil { - // If we can't read the page size, we can assume it's the same - // as the OS -- since that's how the page size was chosen in the - // first place. - // - // If the first page is invalid and this OS uses a different - // page size than what the database was created with then we - // are out of luck and cannot access the database. - db.pageSize = os.Getpagesize() - } else { + // If we can't read the page size, but can read a page, assume + // it's the same as the OS or one given -- since that's how the + // page size was chosen in the first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + // + // TODO: scan for next page + if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { + if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { db.pageSize = int(m.pageSize) } + } else { + return nil, ErrInvalid } } @@ -232,14 +253,50 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { return nil, err } - // Read in the freelist. - db.freelist = newFreelist() - db.freelist.read(db.page(db.meta().freelist)) + if db.readOnly { + return db, nil + } + + db.loadFreelist() + + // Flush freelist when transitioning from no sync to sync so + // NoFreelistSync unaware boltdb can open the db later. + if !db.NoFreelistSync && !db.hasSyncedFreelist() { + tx, err := db.Begin(true) + if tx != nil { + err = tx.Commit() + } + if err != nil { + _ = db.close() + return nil, err + } + } // Mark the database as opened and return. return db, nil } +// loadFreelist reads the freelist if it is synced, or reconstructs it +// by scanning the DB if it is not synced. It assumes there are no +// concurrent accesses being made to the freelist. +func (db *DB) loadFreelist() { + db.freelistLoad.Do(func() { + db.freelist = newFreelist() + if !db.hasSyncedFreelist() { + // Reconstruct free list by scanning the DB. + db.freelist.readIDs(db.freepages()) + } else { + // Read free list from freelist page. + db.freelist.read(db.page(db.meta().freelist)) + } + db.stats.FreePageN = len(db.freelist.ids) + }) +} + +func (db *DB) hasSyncedFreelist() bool { + return db.meta().freelist != pgidNoFreelist +} + // mmap opens the underlying memory-mapped file and initializes the meta references. // minsz is the minimum size that the new mmap can be. func (db *DB) mmap(minsz int) error { @@ -341,9 +398,6 @@ func (db *DB) mmapSize(size int) (int, error) { // init creates a new database file and initializes its meta pages. func (db *DB) init() error { - // Set the page size to the OS page size. - db.pageSize = os.Getpagesize() - // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { @@ -526,21 +580,36 @@ func (db *DB) beginRWTx() (*Tx, error) { t := &Tx{writable: true} t.init(db) db.rwtx = t + db.freePages() + return t, nil +} - // Free any pages associated with closed read-only transactions. - var minid txid = 0xFFFFFFFFFFFFFFFF - for _, t := range db.txs { - if t.meta.txid < minid { - minid = t.meta.txid - } +// freePages releases any pages associated with closed read-only transactions. +func (db *DB) freePages() { + // Free all pending pages prior to earliest open transaction. + sort.Sort(txsById(db.txs)) + minid := txid(0xFFFFFFFFFFFFFFFF) + if len(db.txs) > 0 { + minid = db.txs[0].meta.txid } if minid > 0 { db.freelist.release(minid - 1) } - - return t, nil + // Release unused txid extents. + for _, t := range db.txs { + db.freelist.releaseRange(minid, t.meta.txid-1) + minid = t.meta.txid + 1 + } + db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) + // Any page both allocated and freed in an extent is safe to release. } +type txsById []*Tx + +func (t txsById) Len() int { return len(t) } +func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } + // removeTx removes a transaction from the database. func (db *DB) removeTx(tx *Tx) { // Release the read lock on the mmap. @@ -552,7 +621,10 @@ func (db *DB) removeTx(tx *Tx) { // Remove the transaction. for i, t := range db.txs { if t == tx { - db.txs = append(db.txs[:i], db.txs[i+1:]...) + last := len(db.txs) - 1 + db.txs[i] = db.txs[last] + db.txs[last] = nil + db.txs = db.txs[:last] break } } @@ -630,11 +702,7 @@ func (db *DB) View(fn func(*Tx) error) error { return err } - if err := t.Rollback(); err != nil { - return err - } - - return nil + return t.Rollback() } // Batch calls fn as part of a batch. It behaves similar to Update, @@ -734,9 +802,7 @@ retry: // pass success, or bolt internal errors, to all callers for _, c := range b.calls { - if c.err != nil { - c.err <- err - } + c.err <- err } break retry } @@ -823,7 +889,7 @@ func (db *DB) meta() *meta { } // allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(count int) (*page, error) { +func (db *DB) allocate(txid txid, count int) (*page, error) { // Allocate a temporary buffer for the page. var buf []byte if count == 1 { @@ -835,7 +901,7 @@ func (db *DB) allocate(count int) (*page, error) { p.overflow = uint32(count - 1) // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(count); p.id != 0 { + if p.id = db.freelist.allocate(txid, count); p.id != 0 { return p, nil } @@ -890,6 +956,38 @@ func (db *DB) IsReadOnly() bool { return db.readOnly } +func (db *DB) freepages() []pgid { + tx, err := db.beginTx() + defer func() { + err = tx.Rollback() + if err != nil { + panic("freepages: failed to rollback tx") + } + }() + if err != nil { + panic("freepages: failed to open read only tx") + } + + reachable := make(map[pgid]*page) + nofreed := make(map[pgid]bool) + ech := make(chan error) + go func() { + for e := range ech { + panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) + } + }() + tx.checkBucket(&tx.root, reachable, nofreed, ech) + close(ech) + + var fids []pgid + for i := pgid(2); i < db.meta().pgid; i++ { + if _, ok := reachable[i]; !ok { + fids = append(fids, i) + } + } + return fids +} + // Options represents the options that can be set when opening a database. type Options struct { // Timeout is the amount of time to wait to obtain a file lock. @@ -900,6 +998,10 @@ type Options struct { // Sets the DB.NoGrowSync flag before memory mapping the file. NoGrowSync bool + // Do not sync freelist to disk. This improves the database write performance + // under normal operation, but requires a full database re-sync during recovery. + NoFreelistSync bool + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to // grab a shared lock (UNIX). ReadOnly bool @@ -916,6 +1018,14 @@ type Options struct { // If initialMmapSize is smaller than the previous database size, // it takes no effect. InitialMmapSize int + + // PageSize overrides the default OS page size. + PageSize int + + // NoSync sets the initial value of DB.NoSync. Normally this can just be + // set directly on the DB itself when returned from Open(), but this option + // is useful in APIs which expose Options but not the underlying DB. + NoSync bool } // DefaultOptions represent the options used if nil options are passed into Open(). @@ -952,15 +1062,11 @@ func (s *Stats) Sub(other *Stats) Stats { diff.PendingPageN = s.PendingPageN diff.FreeAlloc = s.FreeAlloc diff.FreelistInuse = s.FreelistInuse - diff.TxN = other.TxN - s.TxN + diff.TxN = s.TxN - other.TxN diff.TxStats = s.TxStats.Sub(&other.TxStats) return diff } -func (s *Stats) add(other *Stats) { - s.TxStats.add(&other.TxStats) -} - type Info struct { Data uintptr PageSize int @@ -999,7 +1105,8 @@ func (m *meta) copy(dest *meta) { func (m *meta) write(p *page) { if m.root.root >= m.pgid { panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid { + } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { + // TODO: reject pgidNoFreeList if !NoFreelistSync panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) } @@ -1026,11 +1133,3 @@ func _assert(condition bool, msg string, v ...interface{}) { panic(fmt.Sprintf("assertion failed: "+msg, v...)) } } - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -func printstack() { - stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") - fmt.Fprintln(os.Stderr, stack) -} diff --git a/vendor/github.com/boltdb/bolt/doc.go b/vendor/github.com/coreos/bbolt/doc.go similarity index 100% rename from vendor/github.com/boltdb/bolt/doc.go rename to vendor/github.com/coreos/bbolt/doc.go diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/coreos/bbolt/errors.go similarity index 100% rename from vendor/github.com/boltdb/bolt/errors.go rename to vendor/github.com/coreos/bbolt/errors.go diff --git a/vendor/github.com/coreos/bbolt/freelist.go b/vendor/github.com/coreos/bbolt/freelist.go new file mode 100644 index 000000000000..266f15429453 --- /dev/null +++ b/vendor/github.com/coreos/bbolt/freelist.go @@ -0,0 +1,333 @@ +package bolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// txPending holds a list of pgids and corresponding allocation txns +// that are pending to be freed. +type txPending struct { + ids []pgid + alloctx []txid // txids allocating the ids + lastReleaseBegin txid // beginning txid of last matching releaseRange +} + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + ids []pgid // all free and available free page ids. + allocs map[pgid]txid // mapping of txid that allocated a pgid. + pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist() *freelist { + return &freelist{ + allocs: make(map[pgid]txid), + pending: make(map[txid]*txPending), + cache: make(map[pgid]bool), + } +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + n := f.count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// free_count returns count of free pages +func (f *freelist) free_count() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, txp := range f.pending { + count += len(txp.ids) + } + return count +} + +// copyall copies into dst a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (f *freelist) copyall(dst []pgid) { + m := make(pgids, 0, f.pending_count()) + for _, txp := range f.pending { + m = append(m, txp.ids...) + } + sort.Sort(m) + mergepgids(dst, f.ids, m) +} + +// allocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) allocate(txid txid, n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + f.allocs[initial] = txid + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + txp := f.pending[txid] + if txp == nil { + txp = &txPending{} + f.pending[txid] = txp + } + allocTxid, ok := f.allocs[p.id] + if ok { + delete(f.allocs, p.id) + } else if (p.flags & freelistPageFlag) != 0 { + // Freelist is always allocated by prior tx. + allocTxid = txid - 1 + } + + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if f.cache[id] { + panic(fmt.Sprintf("page %d already freed", id)) + } + // Add to the freelist and cache. + txp.ids = append(txp.ids, id) + txp.alloctx = append(txp.alloctx, allocTxid) + f.cache[id] = true + } +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, txp := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, txp.ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. +func (f *freelist) releaseRange(begin, end txid) { + if begin > end { + return + } + var m pgids + for tid, txp := range f.pending { + if tid < begin || tid > end { + continue + } + // Don't recompute freed pages if ranges haven't updated. + if txp.lastReleaseBegin == begin { + continue + } + for i := 0; i < len(txp.ids); i++ { + if atx := txp.alloctx[i]; atx < begin || atx > end { + continue + } + m = append(m, txp.ids[i]) + txp.ids[i] = txp.ids[len(txp.ids)-1] + txp.ids = txp.ids[:len(txp.ids)-1] + txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] + txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] + i-- + } + txp.lastReleaseBegin = begin + if len(txp.ids) == 0 { + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + txp := f.pending[txid] + if txp == nil { + return + } + var m pgids + for i, pgid := range txp.ids { + delete(f.cache, pgid) + tx := txp.alloctx[i] + if tx == 0 { + continue + } + if tx != txid { + // Pending free aborted; restore page back to alloc list. + f.allocs[pgid] = tx + } else { + // Freed page was allocated by this txn; OK to throw away. + m = append(m, pgid) + } + } + // Remove pages from pending list and mark as free if allocated by txid. + delete(f.pending, txid) + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgid pgid) bool { + return f.cache[pgid] +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + if (p.flags & freelistPageFlag) == 0 { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) + } + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + idx, count := 0, int(p.count) + if count == 0xFFFF { + idx = 1 + count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + } + + // Copy the list of page ids from the freelist. + if count == 0 { + f.ids = nil + } else { + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx : idx+count] + f.ids = make([]pgid, len(ids)) + copy(f.ids, ids) + + // Make sure they're sorted. + sort.Sort(pgids(f.ids)) + } + + // Rebuild the page cache. + f.reindex() +} + +// read initializes the freelist from a given list of ids. +func (f *freelist) readIDs(ids []pgid) { + f.ids = ids + f.reindex() +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + lenids := f.count() + if lenids == 0 { + p.count = uint16(lenids) + } else if lenids < 0xFFFF { + p.count = uint16(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) + } else { + p.count = 0xFFFF + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, txp := range f.pending { + for _, pendingID := range txp.ids { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.ids { + if !pcache[id] { + a = append(a, id) + } + } + f.ids = a + + // Once the available list is rebuilt then rebuild the free cache so that + // it includes the available and pending free pages. + f.reindex() +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + f.cache = make(map[pgid]bool, len(f.ids)) + for _, id := range f.ids { + f.cache[id] = true + } + for _, txp := range f.pending { + for _, pendingID := range txp.ids { + f.cache[pendingID] = true + } + } +} diff --git a/vendor/github.com/coreos/bbolt/node.go b/vendor/github.com/coreos/bbolt/node.go new file mode 100644 index 000000000000..f4ce240edddb --- /dev/null +++ b/vendor/github.com/coreos/bbolt/node.go @@ -0,0 +1,604 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + } + return sz +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v int) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() int { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { + if pgid >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgid + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +func (n *node) write(p *page) { + // Initialize page. + if n.isLeaf { + p.flags |= leafPageFlag + } else { + p.flags |= branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Stop here if there are no items to write. + if p.count == 0 { + return + } + + // Loop over each item and write it to the page. + b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // If the length of key+value is larger than the max allocation size + // then we need to reallocate the byte array pointer. + // + // See: https://github.com/boltdb/bolt/pull/335 + klen, vlen := len(item.key), len(item.value) + if len(b) < klen+vlen { + b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] + } + + // Write data for the element to the end of the page. + copy(b[0:], item.key) + b = b[klen:] + copy(b[0:], item.value) + b = b[vlen:] + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize int) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize int) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.Split++ + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz int) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = i + inode := n.inodes[i] + elsize := n.pageElementSize() + len(inode.key) + len(inode.value) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if i >= minKeysPerPage && sz+elsize > threshold { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(tx.db.pageSize) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.Spill++ + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.Rebalance++ + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.NodeDeref++ +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff --git a/vendor/github.com/boltdb/bolt/page.go b/vendor/github.com/coreos/bbolt/page.go similarity index 88% rename from vendor/github.com/boltdb/bolt/page.go rename to vendor/github.com/coreos/bbolt/page.go index 7651a6bf7d99..cde403ae86de 100644 --- a/vendor/github.com/boltdb/bolt/page.go +++ b/vendor/github.com/coreos/bbolt/page.go @@ -145,12 +145,33 @@ func (a pgids) merge(b pgids) pgids { // Return the opposite slice if one is nil. if len(a) == 0 { return b - } else if len(b) == 0 { + } + if len(b) == 0 { return a } + merged := make(pgids, len(a)+len(b)) + mergepgids(merged, a, b) + return merged +} + +// mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func mergepgids(dst, a, b pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } - // Create a list to hold all elements from both lists. - merged := make(pgids, 0, len(a)+len(b)) + // Merged will hold all elements from both lists. + merged := dst[:0] // Assign lead to the slice with a lower starting value, follow to the higher value. lead, follow := a, b @@ -172,7 +193,5 @@ func (a pgids) merge(b pgids) pgids { } // Append what's left in follow. - merged = append(merged, follow...) - - return merged + _ = append(merged, follow...) } diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/coreos/bbolt/tx.go similarity index 94% rename from vendor/github.com/boltdb/bolt/tx.go rename to vendor/github.com/coreos/bbolt/tx.go index 1cfb4cde8555..5c0290733f5f 100644 --- a/vendor/github.com/boltdb/bolt/tx.go +++ b/vendor/github.com/coreos/bbolt/tx.go @@ -126,10 +126,7 @@ func (tx *Tx) DeleteBucket(name []byte) error { // the error is returned to the caller. func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { return tx.root.ForEach(func(k, v []byte) error { - if err := fn(k, tx.root.Bucket(k)); err != nil { - return err - } - return nil + return fn(k, tx.root.Bucket(k)) }) } @@ -169,28 +166,18 @@ func (tx *Tx) Commit() error { // Free the old root bucket. tx.meta.root.root = tx.root.root - opgid := tx.meta.pgid - - // Free the freelist and allocate new pages for it. This will overestimate - // the size of the freelist but not underestimate the size (which would be bad). - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) - if err != nil { - tx.rollback() - return err + // Free the old freelist because commit writes out a fresh freelist. + if tx.meta.freelist != pgidNoFreelist { + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id - // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { - tx.rollback() + if !tx.db.NoFreelistSync { + err := tx.commitFreelist() + if err != nil { return err } + } else { + tx.meta.freelist = pgidNoFreelist } // Write dirty pages to disk. @@ -235,6 +222,31 @@ func (tx *Tx) Commit() error { return nil } +func (tx *Tx) commitFreelist() error { + // Allocate new pages for the new free list. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + opgid := tx.meta.pgid + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + return nil +} + // Rollback closes the transaction and ignores all previous updates. Read-only // transactions must be rolled back and not committed. func (tx *Tx) Rollback() error { @@ -305,7 +317,11 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { if err != nil { return 0, err } - defer func() { _ = f.Close() }() + defer func() { + if cerr := f.Close(); err == nil { + err = cerr + } + }() // Generate a meta page. We use the same page data for both meta pages. buf := make([]byte, tx.db.pageSize) @@ -333,7 +349,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { } // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { + if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { return n, fmt.Errorf("seek: %s", err) } @@ -344,7 +360,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { return n, err } - return n, f.Close() + return n, nil } // CopyFile copies the entire database to file at the given path. @@ -379,9 +395,14 @@ func (tx *Tx) Check() <-chan error { } func (tx *Tx) check(ch chan error) { + // Force loading free list if opened in ReadOnly mode. + tx.db.loadFreelist() + // Check if any pages are double freed. freed := make(map[pgid]bool) - for _, id := range tx.db.freelist.all() { + all := make([]pgid, tx.db.freelist.count()) + tx.db.freelist.copyall(all) + for _, id := range all { if freed[id] { ch <- fmt.Errorf("page %d: already freed", id) } @@ -392,8 +413,10 @@ func (tx *Tx) check(ch chan error) { reachable := make(map[pgid]*page) reachable[0] = tx.page(0) // meta0 reachable[1] = tx.page(1) // meta1 - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + if tx.meta.freelist != pgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } } // Recursively check buckets. @@ -451,7 +474,7 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bo // allocate returns a contiguous block of memory starting at a given page. func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(count) + p, err := tx.db.allocate(tx.meta.txid, count) if err != nil { return nil, err } @@ -460,7 +483,7 @@ func (tx *Tx) allocate(count int) (*page, error) { tx.pages[p.id] = p // Update statistics. - tx.stats.PageCount++ + tx.stats.PageCount += count tx.stats.PageAlloc += count * tx.db.pageSize return p, nil diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go index c6e2a12a7faa..009ebda70ca5 100644 --- a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go +++ b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go @@ -803,7 +803,7 @@ func init() { proto.RegisterFile("auth.proto", fileDescriptorAuth) } var fileDescriptorAuth = []byte{ // 288 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30, 0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78, 0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c, 0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d, diff --git a/vendor/github.com/coreos/etcd/auth/jwt.go b/vendor/github.com/coreos/etcd/auth/jwt.go new file mode 100644 index 000000000000..214ae48c83a9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/auth/jwt.go @@ -0,0 +1,137 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "crypto/rsa" + "io/ioutil" + + jwt "github.com/dgrijalva/jwt-go" + "golang.org/x/net/context" +) + +type tokenJWT struct { + signMethod string + signKey *rsa.PrivateKey + verifyKey *rsa.PublicKey +} + +func (t *tokenJWT) enable() {} +func (t *tokenJWT) disable() {} +func (t *tokenJWT) invalidateUser(string) {} +func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil } + +func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) { + // rev isn't used in JWT, it is only used in simple token + var ( + username string + revision uint64 + ) + + parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) { + return t.verifyKey, nil + }) + + switch err.(type) { + case nil: + if !parsed.Valid { + plog.Warningf("invalid jwt token: %s", token) + return nil, false + } + + claims := parsed.Claims.(jwt.MapClaims) + + username = claims["username"].(string) + revision = uint64(claims["revision"].(float64)) + default: + plog.Warningf("failed to parse jwt token: %s", err) + return nil, false + } + + return &AuthInfo{Username: username, Revision: revision}, true +} + +func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) { + // Future work: let a jwt token include permission information would be useful for + // permission checking in proxy side. + tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod), + jwt.MapClaims{ + "username": username, + "revision": revision, + }) + + token, err := tk.SignedString(t.signKey) + if err != nil { + plog.Debugf("failed to sign jwt token: %s", err) + return "", err + } + + plog.Debugf("jwt token: %s", token) + + return token, err +} + +func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) { + for k, v := range opts { + switch k { + case "sign-method": + jwtSignMethod = v + case "pub-key": + jwtPubKeyPath = v + case "priv-key": + jwtPrivKeyPath = v + default: + plog.Errorf("unknown token specific option: %s", k) + return "", "", "", ErrInvalidAuthOpts + } + } + + return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil +} + +func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) { + jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts) + if err != nil { + return nil, ErrInvalidAuthOpts + } + + t := &tokenJWT{} + + t.signMethod = jwtSignMethod + + verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath) + if err != nil { + plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err) + return nil, err + } + t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes) + if err != nil { + plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err) + return nil, err + } + + signBytes, err := ioutil.ReadFile(jwtPrivKeyPath) + if err != nil { + plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err) + return nil, err + } + t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes) + if err != nil { + plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err) + return nil, err + } + + return t, nil +} diff --git a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go index 3cd1ad2a4117..691b65ba38e7 100644 --- a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go +++ b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go @@ -15,93 +15,11 @@ package auth import ( - "bytes" - "sort" - "github.com/coreos/etcd/auth/authpb" "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/pkg/adt" ) -// isSubset returns true if a is a subset of b. -// If a is a prefix of b, then a is a subset of b. -// Given intervals [a1,a2) and [b1,b2), is -// the a interval a subset of b? -func isSubset(a, b *rangePerm) bool { - switch { - case len(a.end) == 0 && len(b.end) == 0: - // a, b are both keys - return bytes.Equal(a.begin, b.begin) - case len(b.end) == 0: - // b is a key, a is a range - return false - case len(a.end) == 0: - // a is a key, b is a range. need b1 <= a1 and a1 < b2 - return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.begin, b.end) < 0 - default: - // both are ranges. need b1 <= a1 and a2 <= b2 - return bytes.Compare(b.begin, a.begin) <= 0 && bytes.Compare(a.end, b.end) <= 0 - } -} - -func isRangeEqual(a, b *rangePerm) bool { - return bytes.Equal(a.begin, b.begin) && bytes.Equal(a.end, b.end) -} - -// removeSubsetRangePerms removes any rangePerms that are subsets of other rangePerms. -// If there are equal ranges, removeSubsetRangePerms only keeps one of them. -// It returns a sorted rangePerm slice. -func removeSubsetRangePerms(perms []*rangePerm) (newp []*rangePerm) { - sort.Sort(RangePermSliceByBegin(perms)) - var prev *rangePerm - for i := range perms { - if i == 0 { - prev = perms[i] - newp = append(newp, perms[i]) - continue - } - if isRangeEqual(perms[i], prev) { - continue - } - if isSubset(perms[i], prev) { - continue - } - if isSubset(prev, perms[i]) { - prev = perms[i] - newp[len(newp)-1] = perms[i] - continue - } - prev = perms[i] - newp = append(newp, perms[i]) - } - return newp -} - -// mergeRangePerms merges adjacent rangePerms. -func mergeRangePerms(perms []*rangePerm) []*rangePerm { - var merged []*rangePerm - perms = removeSubsetRangePerms(perms) - - i := 0 - for i < len(perms) { - begin, next := i, i - for next+1 < len(perms) && bytes.Compare(perms[next].end, perms[next+1].begin) >= 0 { - next++ - } - // don't merge ["a", "b") with ["b", ""), because perms[next+1].end is empty. - if next != begin && len(perms[next].end) > 0 { - merged = append(merged, &rangePerm{begin: perms[begin].begin, end: perms[next].end}) - } else { - merged = append(merged, perms[begin]) - if next != begin { - merged = append(merged, perms[next]) - } - } - i = next + 1 - } - - return merged -} - func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions { user := getUser(tx, userName) if user == nil { @@ -109,7 +27,8 @@ func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermission return nil } - var readPerms, writePerms []*rangePerm + readPerms := &adt.IntervalTree{} + writePerms := &adt.IntervalTree{} for _, roleName := range user.Roles { role := getRole(tx, roleName) @@ -118,48 +37,66 @@ func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermission } for _, perm := range role.KeyPermission { - rp := &rangePerm{begin: perm.Key, end: perm.RangeEnd} + var ivl adt.Interval + var rangeEnd []byte + + if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 { + rangeEnd = perm.RangeEnd + } + + if len(perm.RangeEnd) != 0 { + ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd) + } else { + ivl = adt.NewBytesAffinePoint(perm.Key) + } switch perm.PermType { case authpb.READWRITE: - readPerms = append(readPerms, rp) - writePerms = append(writePerms, rp) + readPerms.Insert(ivl, struct{}{}) + writePerms.Insert(ivl, struct{}{}) case authpb.READ: - readPerms = append(readPerms, rp) + readPerms.Insert(ivl, struct{}{}) case authpb.WRITE: - writePerms = append(writePerms, rp) + writePerms.Insert(ivl, struct{}{}) } } } return &unifiedRangePermissions{ - readPerms: mergeRangePerms(readPerms), - writePerms: mergeRangePerms(writePerms), + readPerms: readPerms, + writePerms: writePerms, } } -func checkKeyPerm(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { - var tocheck []*rangePerm +func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool { + if len(rangeEnd) == 1 && rangeEnd[0] == 0 { + rangeEnd = nil + } + ivl := adt.NewBytesAffineInterval(key, rangeEnd) switch permtyp { case authpb.READ: - tocheck = cachedPerms.readPerms + return cachedPerms.readPerms.Contains(ivl) case authpb.WRITE: - tocheck = cachedPerms.writePerms + return cachedPerms.writePerms.Contains(ivl) default: plog.Panicf("unknown auth type: %v", permtyp) } + return false +} - requiredPerm := &rangePerm{begin: key, end: rangeEnd} - - for _, perm := range tocheck { - if isSubset(requiredPerm, perm) { - return true - } +func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool { + pt := adt.NewBytesAffinePoint(key) + switch permtyp { + case authpb.READ: + return cachedPerms.readPerms.Intersects(pt) + case authpb.WRITE: + return cachedPerms.writePerms.Intersects(pt) + default: + plog.Panicf("unknown auth type: %v", permtyp) } - return false } @@ -175,7 +112,11 @@ func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key as.rangePermCache[userName] = perms } - return checkKeyPerm(as.rangePermCache[userName], key, rangeEnd, permtyp) + if len(rangeEnd) == 0 { + return checkKeyPoint(as.rangePermCache[userName], key, permtyp) + } + + return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp) } func (as *authStore) clearCachedPerm() { @@ -187,35 +128,6 @@ func (as *authStore) invalidateCachedPerm(userName string) { } type unifiedRangePermissions struct { - // readPerms[i] and readPerms[j] (i != j) don't overlap - readPerms []*rangePerm - // writePerms[i] and writePerms[j] (i != j) don't overlap, too - writePerms []*rangePerm -} - -type rangePerm struct { - begin, end []byte -} - -type RangePermSliceByBegin []*rangePerm - -func (slice RangePermSliceByBegin) Len() int { - return len(slice) -} - -func (slice RangePermSliceByBegin) Less(i, j int) bool { - switch bytes.Compare(slice[i].begin, slice[j].begin) { - case 0: // begin(i) == begin(j) - return bytes.Compare(slice[i].end, slice[j].end) == -1 - - case -1: // begin(i) < begin(j) - return true - - default: - return false - } -} - -func (slice RangePermSliceByBegin) Swap(i, j int) { - slice[i], slice[j] = slice[j], slice[i] + readPerms *adt.IntervalTree + writePerms *adt.IntervalTree } diff --git a/vendor/github.com/coreos/etcd/auth/simple_token.go b/vendor/github.com/coreos/etcd/auth/simple_token.go index a39f3927685e..94d92a115e24 100644 --- a/vendor/github.com/coreos/etcd/auth/simple_token.go +++ b/vendor/github.com/coreos/etcd/auth/simple_token.go @@ -19,10 +19,14 @@ package auth import ( "crypto/rand" + "fmt" "math/big" + "strconv" "strings" "sync" "time" + + "golang.org/x/net/context" ) const ( @@ -90,24 +94,14 @@ func (tm *simpleTokenTTLKeeper) run() { } } -func (as *authStore) enable() { - delf := func(tk string) { - if username, ok := as.simpleTokens[tk]; ok { - plog.Infof("deleting token %s for user %s", tk, username) - delete(as.simpleTokens, tk) - } - } - as.simpleTokenKeeper = &simpleTokenTTLKeeper{ - tokens: make(map[string]time.Time), - donec: make(chan struct{}), - stopc: make(chan struct{}), - deleteTokenFunc: delf, - mu: &as.simpleTokensMu, - } - go as.simpleTokenKeeper.run() +type tokenSimple struct { + indexWaiter func(uint64) <-chan struct{} + simpleTokenKeeper *simpleTokenTTLKeeper + simpleTokensMu sync.Mutex + simpleTokens map[string]string // token -> username } -func (as *authStore) GenSimpleToken() (string, error) { +func (t *tokenSimple) genTokenPrefix() (string, error) { ret := make([]byte, defaultSimpleTokenLength) for i := 0; i < defaultSimpleTokenLength; i++ { @@ -122,28 +116,105 @@ func (as *authStore) GenSimpleToken() (string, error) { return string(ret), nil } -func (as *authStore) assignSimpleTokenToUser(username, token string) { - as.simpleTokensMu.Lock() - _, ok := as.simpleTokens[token] +func (t *tokenSimple) assignSimpleTokenToUser(username, token string) { + t.simpleTokensMu.Lock() + _, ok := t.simpleTokens[token] if ok { plog.Panicf("token %s is alredy used", token) } - as.simpleTokens[token] = username - as.simpleTokenKeeper.addSimpleToken(token) - as.simpleTokensMu.Unlock() + t.simpleTokens[token] = username + t.simpleTokenKeeper.addSimpleToken(token) + t.simpleTokensMu.Unlock() } -func (as *authStore) invalidateUser(username string) { - if as.simpleTokenKeeper == nil { +func (t *tokenSimple) invalidateUser(username string) { + if t.simpleTokenKeeper == nil { return } - as.simpleTokensMu.Lock() - for token, name := range as.simpleTokens { + t.simpleTokensMu.Lock() + for token, name := range t.simpleTokens { if strings.Compare(name, username) == 0 { - delete(as.simpleTokens, token) - as.simpleTokenKeeper.deleteSimpleToken(token) + delete(t.simpleTokens, token) + t.simpleTokenKeeper.deleteSimpleToken(token) } } - as.simpleTokensMu.Unlock() + t.simpleTokensMu.Unlock() +} + +func (t *tokenSimple) enable() { + delf := func(tk string) { + if username, ok := t.simpleTokens[tk]; ok { + plog.Infof("deleting token %s for user %s", tk, username) + delete(t.simpleTokens, tk) + } + } + t.simpleTokenKeeper = &simpleTokenTTLKeeper{ + tokens: make(map[string]time.Time), + donec: make(chan struct{}), + stopc: make(chan struct{}), + deleteTokenFunc: delf, + mu: &t.simpleTokensMu, + } + go t.simpleTokenKeeper.run() +} + +func (t *tokenSimple) disable() { + t.simpleTokensMu.Lock() + tk := t.simpleTokenKeeper + t.simpleTokenKeeper = nil + t.simpleTokens = make(map[string]string) // invalidate all tokens + t.simpleTokensMu.Unlock() + if tk != nil { + tk.stop() + } +} + +func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) { + if !t.isValidSimpleToken(ctx, token) { + return nil, false + } + t.simpleTokensMu.Lock() + username, ok := t.simpleTokens[token] + if ok && t.simpleTokenKeeper != nil { + t.simpleTokenKeeper.resetSimpleToken(token) + } + t.simpleTokensMu.Unlock() + return &AuthInfo{Username: username, Revision: revision}, ok +} + +func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) { + // rev isn't used in simple token, it is only used in JWT + index := ctx.Value("index").(uint64) + simpleToken := ctx.Value("simpleToken").(string) + token := fmt.Sprintf("%s.%d", simpleToken, index) + t.assignSimpleTokenToUser(username, token) + + return token, nil +} + +func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool { + splitted := strings.Split(token, ".") + if len(splitted) != 2 { + return false + } + index, err := strconv.Atoi(splitted[1]) + if err != nil { + return false + } + + select { + case <-t.indexWaiter(uint64(index)): + return true + case <-ctx.Done(): + } + + return false +} + +func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}) *tokenSimple { + return &tokenSimple{ + simpleTokens: make(map[string]string), + indexWaiter: indexWaiter, + } } diff --git a/vendor/github.com/coreos/etcd/auth/store.go b/vendor/github.com/coreos/etcd/auth/store.go index 236bb2c529d4..3fac7f5a6fd7 100644 --- a/vendor/github.com/coreos/etcd/auth/store.go +++ b/vendor/github.com/coreos/etcd/auth/store.go @@ -18,11 +18,10 @@ import ( "bytes" "encoding/binary" "errors" - "fmt" "sort" - "strconv" "strings" "sync" + "sync/atomic" "github.com/coreos/etcd/auth/authpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" @@ -30,7 +29,9 @@ import ( "github.com/coreos/pkg/capnslog" "golang.org/x/crypto/bcrypt" "golang.org/x/net/context" + "google.golang.org/grpc/credentials" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" ) var ( @@ -60,6 +61,8 @@ var ( ErrAuthNotEnabled = errors.New("auth: authentication is not enabled") ErrAuthOldRevision = errors.New("auth: revision in header is old") ErrInvalidAuthToken = errors.New("auth: invalid auth token") + ErrInvalidAuthOpts = errors.New("auth: invalid auth options") + ErrInvalidAuthMgmt = errors.New("auth: invalid auth management") // BcryptCost is the algorithm cost / strength for hashing auth passwords BcryptCost = bcrypt.DefaultCost @@ -129,10 +132,6 @@ type AuthStore interface { // RoleList gets a list of all roles RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) - // AuthInfoFromToken gets a username from the given Token and current revision number - // (The revision number is used for preventing the TOCTOU problem) - AuthInfoFromToken(token string) (*AuthInfo, bool) - // IsPutPermitted checks put permission of the user IsPutPermitted(authInfo *AuthInfo, key []byte) error @@ -145,8 +144,9 @@ type AuthStore interface { // IsAdminPermitted checks admin permission of the user IsAdminPermitted(authInfo *AuthInfo) error - // GenSimpleToken produces a simple random string - GenSimpleToken() (string, error) + // GenTokenPrefix produces a random string in a case of simple token + // in a case of JWT, it produces an empty string + GenTokenPrefix() (string, error) // Revision gets current revision of authStore Revision() uint64 @@ -159,33 +159,32 @@ type AuthStore interface { // AuthInfoFromCtx gets AuthInfo from gRPC's context AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) + + // AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context + AuthInfoFromTLS(ctx context.Context) *AuthInfo +} + +type TokenProvider interface { + info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) + assign(ctx context.Context, username string, revision uint64) (string, error) + enable() + disable() + + invalidateUser(string) + genTokenPrefix() (string, error) } type authStore struct { + // atomic operations; need 64-bit align, or 32-bit tests will crash + revision uint64 + be backend.Backend enabled bool enabledMu sync.RWMutex rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions - revision uint64 - - // tokenSimple in v3.2+ - indexWaiter func(uint64) <-chan struct{} - simpleTokenKeeper *simpleTokenTTLKeeper - simpleTokensMu sync.Mutex - simpleTokens map[string]string // token -> username -} - -func newDeleterFunc(as *authStore) func(string) { - return func(t string) { - as.simpleTokensMu.Lock() - defer as.simpleTokensMu.Unlock() - if username, ok := as.simpleTokens[t]; ok { - plog.Infof("deleting token %s for user %s", t, username) - delete(as.simpleTokens, t) - } - } + tokenProvider TokenProvider } func (as *authStore) AuthEnable() error { @@ -215,11 +214,11 @@ func (as *authStore) AuthEnable() error { tx.UnsafePut(authBucketName, enableFlagKey, authEnabled) as.enabled = true - as.enable() + as.tokenProvider.enable() as.rangePermCache = make(map[string]*unifiedRangePermissions) - as.revision = getRevision(tx) + as.setRevision(getRevision(tx)) plog.Noticef("Authentication enabled") @@ -241,15 +240,7 @@ func (as *authStore) AuthDisable() { b.ForceCommit() as.enabled = false - - as.simpleTokensMu.Lock() - tk := as.simpleTokenKeeper - as.simpleTokenKeeper = nil - as.simpleTokens = make(map[string]string) // invalidate all tokens - as.simpleTokensMu.Unlock() - if tk != nil { - tk.stop() - } + as.tokenProvider.disable() plog.Noticef("Authentication disabled") } @@ -260,10 +251,7 @@ func (as *authStore) Close() error { if !as.enabled { return nil } - if as.simpleTokenKeeper != nil { - as.simpleTokenKeeper.stop() - as.simpleTokenKeeper = nil - } + as.tokenProvider.disable() return nil } @@ -272,10 +260,6 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string return nil, ErrAuthNotEnabled } - // TODO(mitake): after adding jwt support, branching based on values of ctx is required - index := ctx.Value("index").(uint64) - simpleToken := ctx.Value("simpleToken").(string) - tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -285,14 +269,23 @@ func (as *authStore) Authenticate(ctx context.Context, username, password string return nil, ErrAuthFailed } - token := fmt.Sprintf("%s.%d", simpleToken, index) - as.assignSimpleTokenToUser(username, token) + // Password checking is already performed in the API layer, so we don't need to check for now. + // Staleness of password can be detected with OCC in the API layer, too. + + token, err := as.tokenProvider.assign(ctx, username, as.Revision()) + if err != nil { + return nil, err + } - plog.Infof("authorized %s, token is %s", username, token) + plog.Debugf("authorized %s, token is %s", username, token) return &pb.AuthenticateResponse{Token: token}, nil } func (as *authStore) CheckPassword(username, password string) (uint64, error) { + if !as.isAuthEnabled() { + return 0, ErrAuthNotEnabled + } + tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -322,7 +315,7 @@ func (as *authStore) Recover(be backend.Backend) { } } - as.revision = getRevision(tx) + as.setRevision(getRevision(tx)) tx.Unlock() @@ -366,6 +359,11 @@ func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, } func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { + if as.enabled && strings.Compare(r.Name, rootUser) == 0 { + plog.Errorf("the user root must not be deleted") + return nil, ErrInvalidAuthMgmt + } + tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -380,7 +378,7 @@ func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDelete as.commitRevision(tx) as.invalidateCachedPerm(r.Name) - as.invalidateUser(r.Name) + as.tokenProvider.invalidateUser(r.Name) plog.Noticef("deleted a user: %s", r.Name) @@ -416,7 +414,7 @@ func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*p as.commitRevision(tx) as.invalidateCachedPerm(r.Name) - as.invalidateUser(r.Name) + as.tokenProvider.invalidateUser(r.Name) plog.Noticef("changed a password of a user: %s", r.Name) @@ -491,6 +489,11 @@ func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListRespon } func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { + if as.enabled && strings.Compare(r.Name, rootUser) == 0 && strings.Compare(r.Role, rootRole) == 0 { + plog.Errorf("the role root must not be revoked from the user root") + return nil, ErrInvalidAuthMgmt + } + tx := as.be.BatchTx() tx.Lock() defer tx.Unlock() @@ -593,17 +596,10 @@ func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) } func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - // TODO(mitake): current scheme of role deletion allows existing users to have the deleted roles - // - // Assume a case like below: - // create a role r1 - // create a user u1 and grant r1 to u1 - // delete r1 - // - // After this sequence, u1 is still granted the role r1. So if admin create a new role with the name r1, - // the new r1 is automatically granted u1. - // In some cases, it would be confusing. So we need to provide an option for deleting the grant relation - // from all users. + if as.enabled && strings.Compare(r.Role, rootRole) == 0 { + plog.Errorf("the role root must not be deleted") + return nil, ErrInvalidAuthMgmt + } tx := as.be.BatchTx() tx.Lock() @@ -616,6 +612,28 @@ func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDelete delRole(tx, r.Role) + users := getAllUsers(tx) + for _, user := range users { + updatedUser := &authpb.User{ + Name: user.Name, + Password: user.Password, + } + + for _, role := range user.Roles { + if strings.Compare(role, r.Role) != 0 { + updatedUser.Roles = append(updatedUser.Roles, role) + } + } + + if len(updatedUser.Roles) == len(user.Roles) { + continue + } + + putUser(tx, updatedUser) + + as.invalidateCachedPerm(string(user.Name)) + } + as.commitRevision(tx) plog.Noticef("deleted role %s", r.Role) @@ -645,15 +663,8 @@ func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, return &pb.AuthRoleAddResponse{}, nil } -func (as *authStore) AuthInfoFromToken(token string) (*AuthInfo, bool) { - // same as '(t *tokenSimple) info' in v3.2+ - as.simpleTokensMu.Lock() - username, ok := as.simpleTokens[token] - if ok && as.simpleTokenKeeper != nil { - as.simpleTokenKeeper.resetSimpleToken(token) - } - as.simpleTokensMu.Unlock() - return &AuthInfo{Username: username, Revision: as.revision}, ok +func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) { + return as.tokenProvider.info(ctx, token, as.Revision()) } type permSlice []*authpb.Permission @@ -723,7 +734,7 @@ func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeE return ErrUserEmpty } - if revision < as.revision { + if revision < as.Revision() { return ErrAuthOldRevision } @@ -886,7 +897,7 @@ func (as *authStore) isAuthEnabled() bool { return as.enabled } -func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{}) *authStore { +func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore { tx := be.BatchTx() tx.Lock() @@ -904,18 +915,17 @@ func NewAuthStore(be backend.Backend, indexWaiter func(uint64) <-chan struct{}) as := &authStore{ be: be, - simpleTokens: make(map[string]string), revision: getRevision(tx), - indexWaiter: indexWaiter, enabled: enabled, rangePermCache: make(map[string]*unifiedRangePermissions), + tokenProvider: tp, } if enabled { - as.enable() + as.tokenProvider.enable() } - if as.revision == 0 { + if as.Revision() == 0 { as.commitRevision(tx) } @@ -935,9 +945,9 @@ func hasRootRole(u *authpb.User) bool { } func (as *authStore) commitRevision(tx backend.BatchTx) { - as.revision++ + atomic.AddUint64(&as.revision, 1) revBytes := make([]byte, revBytesLen) - binary.BigEndian.PutUint64(revBytes, as.revision) + binary.BigEndian.PutUint64(revBytes, as.Revision()) tx.UnsafePut(authBucketName, revisionKey, revBytes) } @@ -951,31 +961,38 @@ func getRevision(tx backend.BatchTx) uint64 { return binary.BigEndian.Uint64(vs[0]) } +func (as *authStore) setRevision(rev uint64) { + atomic.StoreUint64(&as.revision, rev) +} + func (as *authStore) Revision() uint64 { - return as.revision + return atomic.LoadUint64(&as.revision) } -func (as *authStore) isValidSimpleToken(token string, ctx context.Context) bool { - splitted := strings.Split(token, ".") - if len(splitted) != 2 { - return false - } - index, err := strconv.Atoi(splitted[1]) - if err != nil { - return false +func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo { + peer, ok := peer.FromContext(ctx) + if !ok || peer == nil || peer.AuthInfo == nil { + return nil } - select { - case <-as.indexWaiter(uint64(index)): - return true - case <-ctx.Done(): + tlsInfo := peer.AuthInfo.(credentials.TLSInfo) + for _, chains := range tlsInfo.State.VerifiedChains { + for _, chain := range chains { + cn := chain.Subject.CommonName + plog.Debugf("found common name %s", cn) + + return &AuthInfo{ + Username: cn, + Revision: as.Revision(), + } + } } - return false + return nil } func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { - md, ok := metadata.FromContext(ctx) + md, ok := metadata.FromIncomingContext(ctx) if !ok { return nil, nil } @@ -986,14 +1003,57 @@ func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) { } token := ts[0] - if !as.isValidSimpleToken(token, ctx) { - return nil, ErrInvalidAuthToken - } - - authInfo, uok := as.AuthInfoFromToken(token) + authInfo, uok := as.authInfoFromToken(ctx, token) if !uok { plog.Warningf("invalid auth token: %s", token) return nil, ErrInvalidAuthToken } return authInfo, nil } + +func (as *authStore) GenTokenPrefix() (string, error) { + return as.tokenProvider.genTokenPrefix() +} + +func decomposeOpts(optstr string) (string, map[string]string, error) { + opts := strings.Split(optstr, ",") + tokenType := opts[0] + + typeSpecificOpts := make(map[string]string) + for i := 1; i < len(opts); i++ { + pair := strings.Split(opts[i], "=") + + if len(pair) != 2 { + plog.Errorf("invalid token specific option: %s", optstr) + return "", nil, ErrInvalidAuthOpts + } + + if _, ok := typeSpecificOpts[pair[0]]; ok { + plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr) + return "", nil, ErrInvalidAuthOpts + } + + typeSpecificOpts[pair[0]] = pair[1] + } + + return tokenType, typeSpecificOpts, nil + +} + +func NewTokenProvider(tokenOpts string, indexWaiter func(uint64) <-chan struct{}) (TokenProvider, error) { + tokenType, typeSpecificOpts, err := decomposeOpts(tokenOpts) + if err != nil { + return nil, ErrInvalidAuthOpts + } + + switch tokenType { + case "simple": + plog.Warningf("simple token is not cryptographically signed") + return newTokenProviderSimple(indexWaiter), nil + case "jwt": + return newTokenProviderJWT(typeSpecificOpts) + default: + plog.Errorf("unknown token type: %s", tokenType) + return nil, ErrInvalidAuthOpts + } +} diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go index f9131b4725c1..19ce2ec01dad 100644 --- a/vendor/github.com/coreos/etcd/client/client.go +++ b/vendor/github.com/coreos/etcd/client/client.go @@ -15,6 +15,7 @@ package client import ( + "encoding/json" "errors" "fmt" "io/ioutil" @@ -27,6 +28,8 @@ import ( "sync" "time" + "github.com/coreos/etcd/version" + "golang.org/x/net/context" ) @@ -201,6 +204,9 @@ type Client interface { // returned SetEndpoints(eps []string) error + // GetVersion retrieves the current etcd server and cluster version + GetVersion(ctx context.Context) (*version.Versions, error) + httpClient } @@ -366,12 +372,7 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo if err == context.Canceled || err == context.DeadlineExceeded { return nil, nil, err } - if isOneShot { - return nil, nil, err - } - continue - } - if resp.StatusCode/100 == 5 { + } else if resp.StatusCode/100 == 5 { switch resp.StatusCode { case http.StatusInternalServerError, http.StatusServiceUnavailable: // TODO: make sure this is a no leader response @@ -379,10 +380,16 @@ func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Respo default: cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) } - if isOneShot { - return nil, nil, cerr.Errors[0] + err = cerr.Errors[0] + } + if err != nil { + if !isOneShot { + continue } - continue + c.Lock() + c.pinned = (k + 1) % leps + c.Unlock() + return nil, nil, err } if k != pinned { c.Lock() @@ -477,6 +484,33 @@ func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration } } +func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) { + act := &getAction{Prefix: "/version"} + + resp, body, err := c.Do(ctx, act) + if err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK: + if len(body) == 0 { + return nil, ErrEmptyBody + } + var vresp version.Versions + if err := json.Unmarshal(body, &vresp); err != nil { + return nil, ErrInvalidJSON + } + return &vresp, nil + default: + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return nil, ErrInvalidJSON + } + return nil, etcdErr + } +} + type roundTripResponse struct { resp *http.Response err error diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go index bfd7aec93f5c..442e35fe543b 100644 --- a/vendor/github.com/coreos/etcd/client/discover.go +++ b/vendor/github.com/coreos/etcd/client/discover.go @@ -14,8 +14,27 @@ package client +import ( + "github.com/coreos/etcd/pkg/srv" +) + // Discoverer is an interface that wraps the Discover method. type Discoverer interface { // Discover looks up the etcd servers for the domain. Discover(domain string) ([]string, error) } + +type srvDiscover struct{} + +// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. +func NewSRVDiscover() Discoverer { + return &srvDiscover{} +} + +func (d *srvDiscover) Discover(domain string) ([]string, error) { + srvs, err := srv.GetClient("etcd-client", domain) + if err != nil { + return nil, err + } + return srvs.Endpoints, nil +} diff --git a/vendor/github.com/coreos/etcd/client/srv.go b/vendor/github.com/coreos/etcd/client/srv.go deleted file mode 100644 index fdfa34359219..000000000000 --- a/vendor/github.com/coreos/etcd/client/srv.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "fmt" - "net" - "net/url" -) - -var ( - // indirection for testing - lookupSRV = net.LookupSRV -) - -type srvDiscover struct{} - -// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. -func NewSRVDiscover() Discoverer { - return &srvDiscover{} -} - -// Discover looks up the etcd servers for the domain. -func (d *srvDiscover) Discover(domain string) ([]string, error) { - var urls []*url.URL - - updateURLs := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", domain) - if err != nil { - return err - } - for _, srv := range addrs { - urls = append(urls, &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), - }) - } - return nil - } - - errHTTPS := updateURLs("etcd-client-ssl", "https") - errHTTP := updateURLs("etcd-client", "http") - - if errHTTPS != nil && errHTTP != nil { - return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) - } - - endpoints := make([]string, len(urls)) - for i := range urls { - endpoints[i] = urls[i].String() - } - return endpoints, nil -} diff --git a/vendor/github.com/coreos/etcd/clientv3/README.md b/vendor/github.com/coreos/etcd/clientv3/README.md index 87c32d1a88a3..376bfba7614d 100644 --- a/vendor/github.com/coreos/etcd/clientv3/README.md +++ b/vendor/github.com/coreos/etcd/clientv3/README.md @@ -1,6 +1,6 @@ # etcd/clientv3 -[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3) +[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3) `etcd/clientv3` is the official Go etcd client for v3. @@ -32,7 +32,7 @@ pass `context.WithTimeout` to APIs: ```go ctx, cancel := context.WithTimeout(context.Background(), timeout) -resp, err := kvc.Put(ctx, "sample_key", "sample_value") +resp, err := cli.Put(ctx, "sample_key", "sample_value") cancel() if err != nil { // handle error! @@ -57,7 +57,7 @@ etcd client returns 2 types of errors: Here is the example code to handle client errors: ```go -resp, err := kvc.Put(ctx, "", "") +resp, err := cli.Put(ctx, "", "") if err != nil { switch err { case context.Canceled: @@ -76,6 +76,10 @@ if err != nil { The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/coreos/etcd/blob/master/clientv3/example_metrics_test.go). +## Namespacing + +The [namespace](https://godoc.org/github.com/coreos/etcd/clientv3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix. + ## Examples More code examples can be found at [GoDoc](https://godoc.org/github.com/coreos/etcd/clientv3). diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go index b995bce8e3f2..a64b8caca895 100644 --- a/vendor/github.com/coreos/etcd/clientv3/auth.go +++ b/vendor/github.com/coreos/etcd/clientv3/auth.go @@ -20,6 +20,7 @@ import ( "github.com/coreos/etcd/auth/authpb" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" ) @@ -100,68 +101,65 @@ type Auth interface { } type auth struct { - c *Client - - conn *grpc.ClientConn // conn in-use - remote pb.AuthClient + remote pb.AuthClient + callOpts []grpc.CallOption } func NewAuth(c *Client) Auth { - conn := c.ActiveConnection() - return &auth{ - conn: c.ActiveConnection(), - remote: pb.NewAuthClient(conn), - c: c, + api := &auth{remote: RetryAuthClient(c)} + if c != nil { + api.callOpts = c.callOpts } + return api } func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) { - resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...) return (*AuthEnableResponse)(resp), toErr(ctx, err) } func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) { - resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...) return (*AuthDisableResponse)(resp), toErr(ctx, err) } func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) { - resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}) + resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...) return (*AuthUserAddResponse)(resp), toErr(ctx, err) } func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) { - resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}) + resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...) return (*AuthUserDeleteResponse)(resp), toErr(ctx, err) } func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) { - resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}) + resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...) return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err) } func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) { - resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}) + resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...) return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err) } func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) { - resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, grpc.FailFast(false)) + resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...) return (*AuthUserGetResponse)(resp), toErr(ctx, err) } func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) { - resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...) return (*AuthUserListResponse)(resp), toErr(ctx, err) } func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) { - resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}) + resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...) return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) { - resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}) + resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...) return (*AuthRoleAddResponse)(resp), toErr(ctx, err) } @@ -171,27 +169,27 @@ func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, ran RangeEnd: []byte(rangeEnd), PermType: authpb.Permission_Type(permType), } - resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}) + resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...) return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) { - resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, grpc.FailFast(false)) + resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...) return (*AuthRoleGetResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) { - resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, grpc.FailFast(false)) + resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...) return (*AuthRoleListResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) { - resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}) + resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...) return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err) } func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) { - resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}) + resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...) return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err) } @@ -204,12 +202,13 @@ func StrToPermissionType(s string) (PermissionType, error) { } type authenticator struct { - conn *grpc.ClientConn // conn in-use - remote pb.AuthClient + conn *grpc.ClientConn // conn in-use + remote pb.AuthClient + callOpts []grpc.CallOption } func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) { - resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, grpc.FailFast(false)) + resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...) return (*AuthenticateResponse)(resp), toErr(ctx, err) } @@ -217,14 +216,18 @@ func (auth *authenticator) close() { auth.conn.Close() } -func newAuthenticator(endpoint string, opts []grpc.DialOption) (*authenticator, error) { +func newAuthenticator(endpoint string, opts []grpc.DialOption, c *Client) (*authenticator, error) { conn, err := grpc.Dial(endpoint, opts...) if err != nil { return nil, err } - return &authenticator{ + api := &authenticator{ conn: conn, remote: pb.NewAuthClient(conn), - }, nil + } + if c != nil { + api.callOpts = c.callOpts + } + return api, nil } diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer.go deleted file mode 100644 index 0fef9c54934f..000000000000 --- a/vendor/github.com/coreos/etcd/clientv3/balancer.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package clientv3 - -import ( - "net/url" - "strings" - "sync" - - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" -) - -// ErrNoAddrAvilable is returned by Get() when the balancer does not have -// any active connection to endpoints at the time. -// This error is returned only when opts.BlockingWait is true. -var ErrNoAddrAvilable = grpc.Errorf(codes.Unavailable, "there is no address available") - -// simpleBalancer does the bare minimum to expose multiple eps -// to the grpc reconnection code path -type simpleBalancer struct { - // addrs are the client's endpoints for grpc - addrs []grpc.Address - // notifyCh notifies grpc of the set of addresses for connecting - notifyCh chan []grpc.Address - - // readyc closes once the first connection is up - readyc chan struct{} - readyOnce sync.Once - - // mu protects upEps, pinAddr, and connectingAddr - mu sync.RWMutex - // upEps holds the current endpoints that have an active connection - upEps map[string]struct{} - // upc closes when upEps transitions from empty to non-zero or the balancer closes. - upc chan struct{} - - // grpc issues TLS cert checks using the string passed into dial so - // that string must be the host. To recover the full scheme://host URL, - // have a map from hosts to the original endpoint. - host2ep map[string]string - - // pinAddr is the currently pinned address; set to the empty string on - // intialization and shutdown. - pinAddr string - - closed bool -} - -func newSimpleBalancer(eps []string) *simpleBalancer { - notifyCh := make(chan []grpc.Address, 1) - addrs := make([]grpc.Address, len(eps)) - for i := range eps { - addrs[i].Addr = getHost(eps[i]) - } - notifyCh <- addrs - sb := &simpleBalancer{ - addrs: addrs, - notifyCh: notifyCh, - readyc: make(chan struct{}), - upEps: make(map[string]struct{}), - upc: make(chan struct{}), - host2ep: getHost2ep(eps), - } - return sb -} - -func (b *simpleBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } - -func (b *simpleBalancer) ConnectNotify() <-chan struct{} { - b.mu.Lock() - defer b.mu.Unlock() - return b.upc -} - -func (b *simpleBalancer) getEndpoint(host string) string { - b.mu.Lock() - defer b.mu.Unlock() - return b.host2ep[host] -} - -func getHost2ep(eps []string) map[string]string { - hm := make(map[string]string, len(eps)) - for i := range eps { - _, host, _ := parseEndpoint(eps[i]) - hm[host] = eps[i] - } - return hm -} - -func (b *simpleBalancer) updateAddrs(eps []string) { - np := getHost2ep(eps) - - b.mu.Lock() - defer b.mu.Unlock() - - match := len(np) == len(b.host2ep) - for k, v := range np { - if b.host2ep[k] != v { - match = false - break - } - } - if match { - // same endpoints, so no need to update address - return - } - - b.host2ep = np - - addrs := make([]grpc.Address, 0, len(eps)) - for i := range eps { - addrs = append(addrs, grpc.Address{Addr: getHost(eps[i])}) - } - b.addrs = addrs - b.notifyCh <- addrs -} - -func (b *simpleBalancer) Up(addr grpc.Address) func(error) { - b.mu.Lock() - defer b.mu.Unlock() - - // gRPC might call Up after it called Close. We add this check - // to "fix" it up at application layer. Or our simplerBalancer - // might panic since b.upc is closed. - if b.closed { - return func(err error) {} - } - - if len(b.upEps) == 0 { - // notify waiting Get()s and pin first connected address - close(b.upc) - b.pinAddr = addr.Addr - } - b.upEps[addr.Addr] = struct{}{} - - // notify client that a connection is up - b.readyOnce.Do(func() { close(b.readyc) }) - - return func(err error) { - b.mu.Lock() - delete(b.upEps, addr.Addr) - if len(b.upEps) == 0 && b.pinAddr != "" { - b.upc = make(chan struct{}) - } else if b.pinAddr == addr.Addr { - // choose new random up endpoint - for k := range b.upEps { - b.pinAddr = k - break - } - } - b.mu.Unlock() - } -} - -func (b *simpleBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { - var addr string - - // If opts.BlockingWait is false (for fail-fast RPCs), it should return - // an address it has notified via Notify immediately instead of blocking. - if !opts.BlockingWait { - b.mu.RLock() - closed := b.closed - addr = b.pinAddr - upEps := len(b.upEps) - b.mu.RUnlock() - if closed { - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - } - - if upEps == 0 { - return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable - } - return grpc.Address{Addr: addr}, func() {}, nil - } - - for { - b.mu.RLock() - ch := b.upc - b.mu.RUnlock() - select { - case <-ch: - case <-ctx.Done(): - return grpc.Address{Addr: ""}, nil, ctx.Err() - } - b.mu.RLock() - addr = b.pinAddr - upEps := len(b.upEps) - b.mu.RUnlock() - if addr == "" { - return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing - } - if upEps > 0 { - break - } - } - return grpc.Address{Addr: addr}, func() {}, nil -} - -func (b *simpleBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } - -func (b *simpleBalancer) Close() error { - b.mu.Lock() - defer b.mu.Unlock() - // In case gRPC calls close twice. TODO: remove the checking - // when we are sure that gRPC wont call close twice. - if b.closed { - return nil - } - b.closed = true - close(b.notifyCh) - // terminate all waiting Get()s - b.pinAddr = "" - if len(b.upEps) == 0 { - close(b.upc) - } - return nil -} - -func getHost(ep string) string { - url, uerr := url.Parse(ep) - if uerr != nil || !strings.Contains(ep, "://") { - return ep - } - return url.Host -} diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go index 8263890bdff0..2bdd928771f1 100644 --- a/vendor/github.com/coreos/etcd/clientv3/client.go +++ b/vendor/github.com/coreos/etcd/clientv3/client.go @@ -20,22 +20,25 @@ import ( "fmt" "net" "net/url" + "strconv" "strings" "sync" "time" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" - prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) var ( ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints") + ErrOldCluster = errors.New("etcdclient: old cluster version") ) // Client provides and manages an etcd v3 client session. @@ -47,22 +50,25 @@ type Client struct { Auth Maintenance - conn *grpc.ClientConn - cfg Config - creds *credentials.TransportCredentials - balancer *simpleBalancer - retryWrapper retryRpcFunc - retryAuthWrapper retryRpcFunc + conn *grpc.ClientConn + dialerrc chan error + + cfg Config + creds *credentials.TransportCredentials + balancer *healthBalancer + mu *sync.Mutex ctx context.Context cancel context.CancelFunc - // Username is a username for authentication + // Username is a user name for authentication. Username string - // Password is a password for authentication + // Password is a password for authentication. Password string // tokenCred is an instance of WithPerRPCCredentials()'s argument tokenCred *authTokenCredential + + callOpts []grpc.CallOption } // New creates a new etcdv3 client from a given configuration. @@ -74,26 +80,28 @@ func New(cfg Config) (*Client, error) { return newClient(&cfg) } +// NewCtxClient creates a client with a context but no underlying grpc +// connection. This is useful for embedded cases that override the +// service interface implementations and do not need connection management. +func NewCtxClient(ctx context.Context) *Client { + cctx, cancel := context.WithCancel(ctx) + return &Client{ctx: cctx, cancel: cancel} +} + // NewFromURL creates a new etcdv3 client from a URL. func NewFromURL(url string) (*Client, error) { return New(Config{Endpoints: []string{url}}) } -// NewFromConfigFile creates a new etcdv3 client from a configuration file. -func NewFromConfigFile(path string) (*Client, error) { - cfg, err := configFromFile(path) - if err != nil { - return nil, err - } - return New(*cfg) -} - // Close shuts down the client's etcd connections. func (c *Client) Close() error { c.cancel() c.Watcher.Close() c.Lease.Close() - return toErr(c.ctx, c.conn.Close()) + if c.conn != nil { + return toErr(c.ctx, c.conn.Close()) + } + return c.ctx.Err() } // Ctx is a context for "out of band" messages (e.g., for sending @@ -111,8 +119,23 @@ func (c *Client) Endpoints() (eps []string) { // SetEndpoints updates client's endpoints. func (c *Client) SetEndpoints(eps ...string) { + c.mu.Lock() c.cfg.Endpoints = eps - c.balancer.updateAddrs(eps) + c.mu.Unlock() + c.balancer.updateAddrs(eps...) + + // updating notifyCh can trigger new connections, + // need update addrs if all connections are down + // or addrs does not include pinAddr. + c.balancer.mu.RLock() + update := !hasAddr(c.balancer.addrs, c.balancer.pinAddr) + c.balancer.mu.RUnlock() + if update { + select { + case c.balancer.updateAddrsC <- notifyNext: + case <-c.balancer.stopc: + } + } } // Sync synchronizes client's endpoints with the known endpoints from the etcd membership. @@ -139,8 +162,10 @@ func (c *Client) autoSync() { case <-c.ctx.Done(): return case <-time.After(c.cfg.AutoSyncInterval): - ctx, _ := context.WithTimeout(c.ctx, 5*time.Second) - if err := c.Sync(ctx); err != nil && err != c.ctx.Err() { + ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second) + err := c.Sync(ctx) + cancel() + if err != nil && err != c.ctx.Err() { logger.Println("Auto sync endpoints failed:", err) } } @@ -169,7 +194,7 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) { host = endpoint url, uerr := url.Parse(endpoint) if uerr != nil || !strings.Contains(endpoint, "://") { - return + return proto, host, scheme } scheme = url.Scheme @@ -177,12 +202,13 @@ func parseEndpoint(endpoint string) (proto string, host string, scheme string) { host = url.Host switch url.Scheme { case "http", "https": - case "unix": + case "unix", "unixs": proto = "unix" + host = url.Host + url.Path default: proto, host = "", "" } - return + return proto, host, scheme } func (c *Client) processCreds(scheme string) (creds *credentials.TransportCredentials) { @@ -191,7 +217,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden case "unix": case "http": creds = nil - case "https": + case "https", "unixs": if creds != nil { break } @@ -201,7 +227,7 @@ func (c *Client) processCreds(scheme string) (creds *credentials.TransportCreden default: creds = nil } - return + return creds } // dialSetupOpts gives the dial opts prior to any authentication @@ -209,10 +235,22 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts if c.cfg.DialTimeout > 0 { opts = []grpc.DialOption{grpc.WithTimeout(c.cfg.DialTimeout)} } + if c.cfg.DialKeepAliveTime > 0 { + params := keepalive.ClientParameters{ + Time: c.cfg.DialKeepAliveTime, + Timeout: c.cfg.DialKeepAliveTimeout, + } + opts = append(opts, grpc.WithKeepaliveParams(params)) + } opts = append(opts, dopts...) f := func(host string, t time.Duration) (net.Conn, error) { - proto, host, _ := parseEndpoint(c.balancer.getEndpoint(host)) + proto, host, _ := parseEndpoint(c.balancer.endpoint(host)) + if host == "" && endpoint != "" { + // dialing an endpoint not in the balancer; use + // endpoint passed into dial + proto, host, _ = parseEndpoint(endpoint) + } if proto == "" { return nil, fmt.Errorf("unknown scheme for %q", host) } @@ -222,7 +260,14 @@ func (c *Client) dialSetupOpts(endpoint string, dopts ...grpc.DialOption) (opts default: } dialer := &net.Dialer{Timeout: t} - return dialer.DialContext(c.ctx, proto, host) + conn, err := dialer.DialContext(c.ctx, proto, host) + if err != nil { + select { + case c.dialerrc <- err: + default: + } + } + return conn, err } opts = append(opts, grpc.WithDialer(f)) @@ -252,7 +297,7 @@ func (c *Client) getToken(ctx context.Context) error { endpoint := c.cfg.Endpoints[i] host := getHost(endpoint) // use dial options without dopts to avoid reusing the client balancer - auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint)) + auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint), c) if err != nil { continue } @@ -288,21 +333,23 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo defer cancel() ctx = cctx } - if err := c.getToken(ctx); err != nil { - if err == ctx.Err() && ctx.Err() != c.ctx.Err() { - err = grpc.ErrClientConnTimeout + + err := c.getToken(ctx) + if err != nil { + if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled { + if err == ctx.Err() && ctx.Err() != c.ctx.Err() { + err = context.DeadlineExceeded + } + return nil, err } - return nil, err + } else { + opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) } - - opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred)) } - // add metrics options - opts = append(opts, grpc.WithUnaryInterceptor(prometheus.UnaryClientInterceptor)) - opts = append(opts, grpc.WithStreamInterceptor(prometheus.StreamClientInterceptor)) + opts = append(opts, c.cfg.DialOptions...) - conn, err := grpc.Dial(host, opts...) + conn, err := grpc.DialContext(c.ctx, host, opts...) if err != nil { return nil, err } @@ -313,7 +360,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo // when the cluster has a leader. func WithRequireLeader(ctx context.Context) context.Context { md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader) - return metadata.NewContext(ctx, md) + return metadata.NewOutgoingContext(ctx, md) } func newClient(cfg *Config) (*Client, error) { @@ -327,20 +374,50 @@ func newClient(cfg *Config) (*Client, error) { } // use a temporary skeleton client to bootstrap first connection - ctx, cancel := context.WithCancel(context.TODO()) + baseCtx := context.TODO() + if cfg.Context != nil { + baseCtx = cfg.Context + } + + ctx, cancel := context.WithCancel(baseCtx) client := &Client{ - conn: nil, - cfg: *cfg, - creds: creds, - ctx: ctx, - cancel: cancel, + conn: nil, + dialerrc: make(chan error, 1), + cfg: *cfg, + creds: creds, + ctx: ctx, + cancel: cancel, + mu: new(sync.Mutex), + callOpts: defaultCallOpts, } if cfg.Username != "" && cfg.Password != "" { client.Username = cfg.Username client.Password = cfg.Password } + if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 { + if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize { + return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize) + } + callOpts := []grpc.CallOption{ + defaultFailFast, + defaultMaxCallSendMsgSize, + defaultMaxCallRecvMsgSize, + } + if cfg.MaxCallSendMsgSize > 0 { + callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize) + } + if cfg.MaxCallRecvMsgSize > 0 { + callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize) + } + client.callOpts = callOpts + } - client.balancer = newSimpleBalancer(cfg.Endpoints) + client.balancer = newHealthBalancer(cfg.Endpoints, cfg.DialTimeout, func(ep string) (bool, error) { + return grpcHealthCheck(client, ep) + }) + + // use Endpoints[0] so that for https:// without any tls config given, then + // grpc will assume the certificate server name is the endpoint host. conn, err := client.dial(cfg.Endpoints[0], grpc.WithBalancer(client.balancer)) if err != nil { client.cancel() @@ -348,24 +425,27 @@ func newClient(cfg *Config) (*Client, error) { return nil, err } client.conn = conn - client.retryWrapper = client.newRetryWrapper() - client.retryAuthWrapper = client.newAuthRetryWrapper() // wait for a connection if cfg.DialTimeout > 0 { hasConn := false waitc := time.After(cfg.DialTimeout) select { - case <-client.balancer.readyc: + case <-client.balancer.ready(): hasConn = true case <-ctx.Done(): case <-waitc: } if !hasConn { + err := context.DeadlineExceeded + select { + case err = <-client.dialerrc: + default: + } client.cancel() client.balancer.Close() conn.Close() - return nil, grpc.ErrClientConnTimeout + return nil, err } } @@ -376,10 +456,57 @@ func newClient(cfg *Config) (*Client, error) { client.Auth = NewAuth(client) client.Maintenance = NewMaintenance(client) + if cfg.RejectOldCluster { + if err := client.checkVersion(); err != nil { + client.Close() + return nil, err + } + } + go client.autoSync() return client, nil } +func (c *Client) checkVersion() (err error) { + var wg sync.WaitGroup + errc := make(chan error, len(c.cfg.Endpoints)) + ctx, cancel := context.WithCancel(c.ctx) + if c.cfg.DialTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout) + } + wg.Add(len(c.cfg.Endpoints)) + for _, ep := range c.cfg.Endpoints { + // if cluster is current, any endpoint gives a recent version + go func(e string) { + defer wg.Done() + resp, rerr := c.Status(ctx, e) + if rerr != nil { + errc <- rerr + return + } + vs := strings.Split(resp.Version, ".") + maj, min := 0, 0 + if len(vs) >= 2 { + maj, _ = strconv.Atoi(vs[0]) + min, rerr = strconv.Atoi(vs[1]) + } + if maj < 3 || (maj == 3 && min < 2) { + rerr = ErrOldCluster + } + errc <- rerr + }(ep) + } + // wait for success + for i := 0; i < len(c.cfg.Endpoints); i++ { + if err = <-errc; err == nil { + break + } + } + cancel() + wg.Wait() + return err +} + // ActiveConnection returns the current in-use connection func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn } @@ -392,14 +519,14 @@ func isHaltErr(ctx context.Context, err error) bool { if err == nil { return false } - code := grpc.Code(err) + ev, _ := status.FromError(err) // Unavailable codes mean the system will be right back. // (e.g., can't connect, lost leader) // Treat Internal codes as if something failed, leaving the // system in an inconsistent state, but retrying could make progress. // (e.g., failed in middle of send, corrupted frame) // TODO: are permanent Internal errors possible from grpc? - return code != codes.Unavailable && code != codes.Internal + return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal } func toErr(ctx context.Context, err error) error { @@ -410,7 +537,8 @@ func toErr(ctx context.Context, err error) error { if _, ok := err.(rpctypes.EtcdError); ok { return err } - code := grpc.Code(err) + ev, _ := status.FromError(err) + code := ev.Code() switch code { case codes.DeadlineExceeded: fallthrough @@ -419,9 +547,16 @@ func toErr(ctx context.Context, err error) error { err = ctx.Err() } case codes.Unavailable: - err = ErrNoAvailableEndpoints case codes.FailedPrecondition: err = grpc.ErrClientConnClosing } return err } + +func canceledByCaller(stopCtx context.Context, err error) bool { + if stopCtx.Err() == nil || err == nil { + return false + } + + return err == context.Canceled || err == context.DeadlineExceeded +} diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go index b9bff626bd7b..545d676e7bb3 100644 --- a/vendor/github.com/coreos/etcd/clientv3/cluster.go +++ b/vendor/github.com/coreos/etcd/clientv3/cluster.go @@ -16,6 +16,7 @@ package clientv3 import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" ) @@ -43,60 +44,59 @@ type Cluster interface { } type cluster struct { - remote pb.ClusterClient + remote pb.ClusterClient + callOpts []grpc.CallOption } func NewCluster(c *Client) Cluster { - return &cluster{remote: RetryClusterClient(c)} + api := &cluster{remote: RetryClusterClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster { + api := &cluster{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api } func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) { r := &pb.MemberAddRequest{PeerURLs: peerAddrs} - resp, err := c.remote.MemberAdd(ctx, r) - if err == nil { - return (*MemberAddResponse)(resp), nil - } - if isHaltErr(ctx, err) { + resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...) + if err != nil { return nil, toErr(ctx, err) } - return nil, toErr(ctx, err) + return (*MemberAddResponse)(resp), nil } func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) { r := &pb.MemberRemoveRequest{ID: id} - resp, err := c.remote.MemberRemove(ctx, r) - if err == nil { - return (*MemberRemoveResponse)(resp), nil - } - if isHaltErr(ctx, err) { + resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...) + if err != nil { return nil, toErr(ctx, err) } - return nil, toErr(ctx, err) + return (*MemberRemoveResponse)(resp), nil } func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) { // it is safe to retry on update. - for { - r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} - resp, err := c.remote.MemberUpdate(ctx, r, grpc.FailFast(false)) - if err == nil { - return (*MemberUpdateResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs} + resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...) + if err == nil { + return (*MemberUpdateResponse)(resp), nil } + return nil, toErr(ctx, err) } func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) { // it is safe to retry on list. - for { - resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, grpc.FailFast(false)) - if err == nil { - return (*MemberListResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...) + if err == nil { + return (*MemberListResponse)(resp), nil } + return nil, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/vendor/github.com/coreos/etcd/clientv3/compact_op.go index 32d97eb0cc1c..41e80c1da5d4 100644 --- a/vendor/github.com/coreos/etcd/clientv3/compact_op.go +++ b/vendor/github.com/coreos/etcd/clientv3/compact_op.go @@ -44,10 +44,8 @@ func (op CompactOp) toRequest() *pb.CompactionRequest { return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical} } -// WithCompactPhysical makes compact RPC call wait until -// the compaction is physically applied to the local database -// such that compacted entries are totally removed from the -// backend database. +// WithCompactPhysical makes Compact wait until all compacted entries are +// removed from the etcd server's storage. func WithCompactPhysical() CompactOption { return func(op *CompactOp) { op.physical = true } } diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/github.com/coreos/etcd/clientv3/compare.go index f89ffb52c4ad..68a25fd800fb 100644 --- a/vendor/github.com/coreos/etcd/clientv3/compare.go +++ b/vendor/github.com/coreos/etcd/clientv3/compare.go @@ -82,6 +82,24 @@ func ModRevision(key string) Cmp { return Cmp{Key: []byte(key), Target: pb.Compare_MOD} } +// KeyBytes returns the byte slice holding with the comparison key. +func (cmp *Cmp) KeyBytes() []byte { return cmp.Key } + +// WithKeyBytes sets the byte slice for the comparison key. +func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key } + +// ValueBytes returns the byte slice holding the comparison value, if any. +func (cmp *Cmp) ValueBytes() []byte { + if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok { + return tu.Value + } + return nil +} + +// WithValueBytes sets the byte slice for the comparison's value. +func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v } + +// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise. func mustInt64(val interface{}) int64 { if v, ok := val.(int64); ok { return v @@ -91,3 +109,12 @@ func mustInt64(val interface{}) int64 { } panic("bad value") } + +// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an +// int64 otherwise. +func mustInt64orLeaseID(val interface{}) int64 { + if v, ok := val.(LeaseID); ok { + return int64(v) + } + return mustInt64(val) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go new file mode 100644 index 000000000000..dcdbf511d1b1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go @@ -0,0 +1,17 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package concurrency implements concurrency operations on top of +// etcd such as distributed locks, barriers, and elections. +package concurrency diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go new file mode 100644 index 000000000000..c092bde0aeb6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go @@ -0,0 +1,246 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "errors" + "fmt" + + v3 "github.com/coreos/etcd/clientv3" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc/mvccpb" + + "golang.org/x/net/context" +) + +var ( + ErrElectionNotLeader = errors.New("election: not leader") + ErrElectionNoLeader = errors.New("election: no leader") +) + +type Election struct { + session *Session + + keyPrefix string + + leaderKey string + leaderRev int64 + leaderSession *Session + hdr *pb.ResponseHeader +} + +// NewElection returns a new election on a given key prefix. +func NewElection(s *Session, pfx string) *Election { + return &Election{session: s, keyPrefix: pfx + "/"} +} + +// ResumeElection initializes an election with a known leader. +func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election { + return &Election{ + session: s, + leaderKey: leaderKey, + leaderRev: leaderRev, + leaderSession: s, + } +} + +// Campaign puts a value as eligible for the election. It blocks until +// it is elected, an error occurs, or the context is cancelled. +func (e *Election) Campaign(ctx context.Context, val string) error { + s := e.session + client := e.session.Client() + + k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease()) + txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0)) + txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease()))) + txn = txn.Else(v3.OpGet(k)) + resp, err := txn.Commit() + if err != nil { + return err + } + e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s + if !resp.Succeeded { + kv := resp.Responses[0].GetResponseRange().Kvs[0] + e.leaderRev = kv.CreateRevision + if string(kv.Value) != val { + if err = e.Proclaim(ctx, val); err != nil { + e.Resign(ctx) + return err + } + } + } + + _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1) + if err != nil { + // clean up in case of context cancel + select { + case <-ctx.Done(): + e.Resign(client.Ctx()) + default: + e.leaderSession = nil + } + return err + } + e.hdr = resp.Header + + return nil +} + +// Proclaim lets the leader announce a new value without another election. +func (e *Election) Proclaim(ctx context.Context, val string) error { + if e.leaderSession == nil { + return ErrElectionNotLeader + } + client := e.session.Client() + cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) + txn := client.Txn(ctx).If(cmp) + txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease()))) + tresp, terr := txn.Commit() + if terr != nil { + return terr + } + if !tresp.Succeeded { + e.leaderKey = "" + return ErrElectionNotLeader + } + + e.hdr = tresp.Header + return nil +} + +// Resign lets a leader start a new election. +func (e *Election) Resign(ctx context.Context) (err error) { + if e.leaderSession == nil { + return nil + } + client := e.session.Client() + cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev) + resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit() + if err == nil { + e.hdr = resp.Header + } + e.leaderKey = "" + e.leaderSession = nil + return err +} + +// Leader returns the leader value for the current election. +func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) { + client := e.session.Client() + resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) + if err != nil { + return nil, err + } else if len(resp.Kvs) == 0 { + // no leader currently elected + return nil, ErrElectionNoLeader + } + return resp, nil +} + +// Observe returns a channel that reliably observes ordered leader proposals +// as GetResponse values on every current elected leader key. It will not +// necessarily fetch all historical leader updates, but will always post the +// most recent leader value. +// +// The channel closes when the context is canceled or the underlying watcher +// is otherwise disrupted. +func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse { + retc := make(chan v3.GetResponse) + go e.observe(ctx, retc) + return retc +} + +func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) { + client := e.session.Client() + + defer close(ch) + for { + resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...) + if err != nil { + return + } + + var kv *mvccpb.KeyValue + var hdr *pb.ResponseHeader + + if len(resp.Kvs) == 0 { + cctx, cancel := context.WithCancel(ctx) + // wait for first key put on prefix + opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()} + wch := client.Watch(cctx, e.keyPrefix, opts...) + for kv == nil { + wr, ok := <-wch + if !ok || wr.Err() != nil { + cancel() + return + } + // only accept puts; a delete will make observe() spin + for _, ev := range wr.Events { + if ev.Type == mvccpb.PUT { + hdr, kv = &wr.Header, ev.Kv + // may have multiple revs; hdr.rev = the last rev + // set to kv's rev in case batch has multiple Puts + hdr.Revision = kv.ModRevision + break + } + } + } + cancel() + } else { + hdr, kv = resp.Header, resp.Kvs[0] + } + + select { + case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}: + case <-ctx.Done(): + return + } + + cctx, cancel := context.WithCancel(ctx) + wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1)) + keyDeleted := false + for !keyDeleted { + wr, ok := <-wch + if !ok { + cancel() + return + } + for _, ev := range wr.Events { + if ev.Type == mvccpb.DELETE { + keyDeleted = true + break + } + resp.Header = &wr.Header + resp.Kvs = []*mvccpb.KeyValue{ev.Kv} + select { + case ch <- *resp: + case <-cctx.Done(): + cancel() + return + } + } + } + cancel() + } +} + +// Key returns the leader key if elected, empty string otherwise. +func (e *Election) Key() string { return e.leaderKey } + +// Rev returns the leader key's creation revision, if elected. +func (e *Election) Rev() int64 { return e.leaderRev } + +// Header is the response header from the last successful election proposal. +func (e *Election) Header() *pb.ResponseHeader { return e.hdr } diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go new file mode 100644 index 000000000000..9936737756ca --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go @@ -0,0 +1,66 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "fmt" + + v3 "github.com/coreos/etcd/clientv3" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc/mvccpb" + + "golang.org/x/net/context" +) + +func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error { + cctx, cancel := context.WithCancel(ctx) + defer cancel() + + var wr v3.WatchResponse + wch := client.Watch(cctx, key, v3.WithRev(rev)) + for wr = range wch { + for _, ev := range wr.Events { + if ev.Type == mvccpb.DELETE { + return nil + } + } + } + if err := wr.Err(); err != nil { + return err + } + if err := ctx.Err(); err != nil { + return err + } + return fmt.Errorf("lost watcher waiting for delete") +} + +// waitDeletes efficiently waits until all keys matching the prefix and no greater +// than the create revision. +func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) { + getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev)) + for { + resp, err := client.Get(ctx, pfx, getOpts...) + if err != nil { + return nil, err + } + if len(resp.Kvs) == 0 { + return resp.Header, nil + } + lastKey := string(resp.Kvs[0].Key) + if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil { + return nil, err + } + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go new file mode 100644 index 000000000000..736a9d3d353f --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go @@ -0,0 +1,119 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "fmt" + "sync" + + v3 "github.com/coreos/etcd/clientv3" + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" +) + +// Mutex implements the sync Locker interface with etcd +type Mutex struct { + s *Session + + pfx string + myKey string + myRev int64 + hdr *pb.ResponseHeader +} + +func NewMutex(s *Session, pfx string) *Mutex { + return &Mutex{s, pfx + "/", "", -1, nil} +} + +// Lock locks the mutex with a cancelable context. If the context is canceled +// while trying to acquire the lock, the mutex tries to clean its stale lock entry. +func (m *Mutex) Lock(ctx context.Context) error { + s := m.s + client := m.s.Client() + + m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease()) + cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0) + // put self in lock waiters via myKey; oldest waiter holds lock + put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease())) + // reuse key in case this session already holds the lock + get := v3.OpGet(m.myKey) + // fetch current holder to complete uncontended path with only one RPC + getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...) + resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit() + if err != nil { + return err + } + m.myRev = resp.Header.Revision + if !resp.Succeeded { + m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision + } + // if no key on prefix / the minimum rev is key, already hold the lock + ownerKey := resp.Responses[1].GetResponseRange().Kvs + if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev { + m.hdr = resp.Header + return nil + } + + // wait for deletion revisions prior to myKey + hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1) + // release lock key if cancelled + select { + case <-ctx.Done(): + m.Unlock(client.Ctx()) + default: + m.hdr = hdr + } + return werr +} + +func (m *Mutex) Unlock(ctx context.Context) error { + client := m.s.Client() + if _, err := client.Delete(ctx, m.myKey); err != nil { + return err + } + m.myKey = "\x00" + m.myRev = -1 + return nil +} + +func (m *Mutex) IsOwner() v3.Cmp { + return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev) +} + +func (m *Mutex) Key() string { return m.myKey } + +// Header is the response header received from etcd on acquiring the lock. +func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr } + +type lockerMutex struct{ *Mutex } + +func (lm *lockerMutex) Lock() { + client := lm.s.Client() + if err := lm.Mutex.Lock(client.Ctx()); err != nil { + panic(err) + } +} +func (lm *lockerMutex) Unlock() { + client := lm.s.Client() + if err := lm.Mutex.Unlock(client.Ctx()); err != nil { + panic(err) + } +} + +// NewLocker creates a sync.Locker backed by an etcd mutex. +func NewLocker(s *Session, pfx string) sync.Locker { + return &lockerMutex{NewMutex(s, pfx)} +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go new file mode 100644 index 000000000000..55cb553ea4ad --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go @@ -0,0 +1,142 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "time" + + v3 "github.com/coreos/etcd/clientv3" + + "golang.org/x/net/context" +) + +const defaultSessionTTL = 60 + +// Session represents a lease kept alive for the lifetime of a client. +// Fault-tolerant applications may use sessions to reason about liveness. +type Session struct { + client *v3.Client + opts *sessionOptions + id v3.LeaseID + + cancel context.CancelFunc + donec <-chan struct{} +} + +// NewSession gets the leased session for a client. +func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) { + ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()} + for _, opt := range opts { + opt(ops) + } + + id := ops.leaseID + if id == v3.NoLease { + resp, err := client.Grant(ops.ctx, int64(ops.ttl)) + if err != nil { + return nil, err + } + id = v3.LeaseID(resp.ID) + } + + ctx, cancel := context.WithCancel(ops.ctx) + keepAlive, err := client.KeepAlive(ctx, id) + if err != nil || keepAlive == nil { + cancel() + return nil, err + } + + donec := make(chan struct{}) + s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec} + + // keep the lease alive until client error or cancelled context + go func() { + defer close(donec) + for range keepAlive { + // eat messages until keep alive channel closes + } + }() + + return s, nil +} + +// Client is the etcd client that is attached to the session. +func (s *Session) Client() *v3.Client { + return s.client +} + +// Lease is the lease ID for keys bound to the session. +func (s *Session) Lease() v3.LeaseID { return s.id } + +// Done returns a channel that closes when the lease is orphaned, expires, or +// is otherwise no longer being refreshed. +func (s *Session) Done() <-chan struct{} { return s.donec } + +// Orphan ends the refresh for the session lease. This is useful +// in case the state of the client connection is indeterminate (revoke +// would fail) or when transferring lease ownership. +func (s *Session) Orphan() { + s.cancel() + <-s.donec +} + +// Close orphans the session and revokes the session lease. +func (s *Session) Close() error { + s.Orphan() + // if revoke takes longer than the ttl, lease is expired anyway + ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second) + _, err := s.client.Revoke(ctx, s.id) + cancel() + return err +} + +type sessionOptions struct { + ttl int + leaseID v3.LeaseID + ctx context.Context +} + +// SessionOption configures Session. +type SessionOption func(*sessionOptions) + +// WithTTL configures the session's TTL in seconds. +// If TTL is <= 0, the default 60 seconds TTL will be used. +func WithTTL(ttl int) SessionOption { + return func(so *sessionOptions) { + if ttl > 0 { + so.ttl = ttl + } + } +} + +// WithLease specifies the existing leaseID to be used for the session. +// This is useful in process restart scenario, for example, to reclaim +// leadership from an election prior to restart. +func WithLease(leaseID v3.LeaseID) SessionOption { + return func(so *sessionOptions) { + so.leaseID = leaseID + } +} + +// WithContext assigns a context to the session instead of defaulting to +// using the client context. This is useful for canceling NewSession and +// Close operations immediately without having to close the client. If the +// context is canceled before Close() completes, the session's lease will be +// abandoned and left to expire instead of being revoked. +func WithContext(ctx context.Context) SessionOption { + return func(so *sessionOptions) { + so.ctx = ctx + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go new file mode 100644 index 000000000000..6bfd70ec4286 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go @@ -0,0 +1,388 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package concurrency + +import ( + "math" + + v3 "github.com/coreos/etcd/clientv3" + + "golang.org/x/net/context" +) + +// STM is an interface for software transactional memory. +type STM interface { + // Get returns the value for a key and inserts the key in the txn's read set. + // If Get fails, it aborts the transaction with an error, never returning. + Get(key ...string) string + // Put adds a value for a key to the write set. + Put(key, val string, opts ...v3.OpOption) + // Rev returns the revision of a key in the read set. + Rev(key string) int64 + // Del deletes a key. + Del(key string) + + // commit attempts to apply the txn's changes to the server. + commit() *v3.TxnResponse + reset() +} + +// Isolation is an enumeration of transactional isolation levels which +// describes how transactions should interfere and conflict. +type Isolation int + +const ( + // SerializableSnapshot provides serializable isolation and also checks + // for write conflicts. + SerializableSnapshot Isolation = iota + // Serializable reads within the same transaction attempt return data + // from the at the revision of the first read. + Serializable + // RepeatableReads reads within the same transaction attempt always + // return the same data. + RepeatableReads + // ReadCommitted reads keys from any committed revision. + ReadCommitted +) + +// stmError safely passes STM errors through panic to the STM error channel. +type stmError struct{ err error } + +type stmOptions struct { + iso Isolation + ctx context.Context + prefetch []string +} + +type stmOption func(*stmOptions) + +// WithIsolation specifies the transaction isolation level. +func WithIsolation(lvl Isolation) stmOption { + return func(so *stmOptions) { so.iso = lvl } +} + +// WithAbortContext specifies the context for permanently aborting the transaction. +func WithAbortContext(ctx context.Context) stmOption { + return func(so *stmOptions) { so.ctx = ctx } +} + +// WithPrefetch is a hint to prefetch a list of keys before trying to apply. +// If an STM transaction will unconditionally fetch a set of keys, prefetching +// those keys will save the round-trip cost from requesting each key one by one +// with Get(). +func WithPrefetch(keys ...string) stmOption { + return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) } +} + +// NewSTM initiates a new STM instance, using serializable snapshot isolation by default. +func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) { + opts := &stmOptions{ctx: c.Ctx()} + for _, f := range so { + f(opts) + } + if len(opts.prefetch) != 0 { + f := apply + apply = func(s STM) error { + s.Get(opts.prefetch...) + return f(s) + } + } + return runSTM(mkSTM(c, opts), apply) +} + +func mkSTM(c *v3.Client, opts *stmOptions) STM { + switch opts.iso { + case SerializableSnapshot: + s := &stmSerializable{ + stm: stm{client: c, ctx: opts.ctx}, + prefetch: make(map[string]*v3.GetResponse), + } + s.conflicts = func() []v3.Cmp { + return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...) + } + return s + case Serializable: + s := &stmSerializable{ + stm: stm{client: c, ctx: opts.ctx}, + prefetch: make(map[string]*v3.GetResponse), + } + s.conflicts = func() []v3.Cmp { return s.rset.cmps() } + return s + case RepeatableReads: + s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} + s.conflicts = func() []v3.Cmp { return s.rset.cmps() } + return s + case ReadCommitted: + s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}} + s.conflicts = func() []v3.Cmp { return nil } + return s + default: + panic("unsupported stm") + } +} + +type stmResponse struct { + resp *v3.TxnResponse + err error +} + +func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) { + outc := make(chan stmResponse, 1) + go func() { + defer func() { + if r := recover(); r != nil { + e, ok := r.(stmError) + if !ok { + // client apply panicked + panic(r) + } + outc <- stmResponse{nil, e.err} + } + }() + var out stmResponse + for { + s.reset() + if out.err = apply(s); out.err != nil { + break + } + if out.resp = s.commit(); out.resp != nil { + break + } + } + outc <- out + }() + r := <-outc + return r.resp, r.err +} + +// stm implements repeatable-read software transactional memory over etcd +type stm struct { + client *v3.Client + ctx context.Context + // rset holds read key values and revisions + rset readSet + // wset holds overwritten keys and their values + wset writeSet + // getOpts are the opts used for gets + getOpts []v3.OpOption + // conflicts computes the current conflicts on the txn + conflicts func() []v3.Cmp +} + +type stmPut struct { + val string + op v3.Op +} + +type readSet map[string]*v3.GetResponse + +func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) { + for i, resp := range txnresp.Responses { + rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange()) + } +} + +// first returns the store revision from the first fetch +func (rs readSet) first() int64 { + ret := int64(math.MaxInt64 - 1) + for _, resp := range rs { + if rev := resp.Header.Revision; rev < ret { + ret = rev + } + } + return ret +} + +// cmps guards the txn from updates to read set +func (rs readSet) cmps() []v3.Cmp { + cmps := make([]v3.Cmp, 0, len(rs)) + for k, rk := range rs { + cmps = append(cmps, isKeyCurrent(k, rk)) + } + return cmps +} + +type writeSet map[string]stmPut + +func (ws writeSet) get(keys ...string) *stmPut { + for _, key := range keys { + if wv, ok := ws[key]; ok { + return &wv + } + } + return nil +} + +// cmps returns a cmp list testing no writes have happened past rev +func (ws writeSet) cmps(rev int64) []v3.Cmp { + cmps := make([]v3.Cmp, 0, len(ws)) + for key := range ws { + cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev)) + } + return cmps +} + +// puts is the list of ops for all pending writes +func (ws writeSet) puts() []v3.Op { + puts := make([]v3.Op, 0, len(ws)) + for _, v := range ws { + puts = append(puts, v.op) + } + return puts +} + +func (s *stm) Get(keys ...string) string { + if wv := s.wset.get(keys...); wv != nil { + return wv.val + } + return respToValue(s.fetch(keys...)) +} + +func (s *stm) Put(key, val string, opts ...v3.OpOption) { + s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)} +} + +func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} } + +func (s *stm) Rev(key string) int64 { + if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 { + return resp.Kvs[0].ModRevision + } + return 0 +} + +func (s *stm) commit() *v3.TxnResponse { + txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit() + if err != nil { + panic(stmError{err}) + } + if txnresp.Succeeded { + return txnresp + } + return nil +} + +func (s *stm) fetch(keys ...string) *v3.GetResponse { + if len(keys) == 0 { + return nil + } + ops := make([]v3.Op, len(keys)) + for i, key := range keys { + if resp, ok := s.rset[key]; ok { + return resp + } + ops[i] = v3.OpGet(key, s.getOpts...) + } + txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit() + if err != nil { + panic(stmError{err}) + } + s.rset.add(keys, txnresp) + return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange()) +} + +func (s *stm) reset() { + s.rset = make(map[string]*v3.GetResponse) + s.wset = make(map[string]stmPut) +} + +type stmSerializable struct { + stm + prefetch map[string]*v3.GetResponse +} + +func (s *stmSerializable) Get(keys ...string) string { + if wv := s.wset.get(keys...); wv != nil { + return wv.val + } + firstRead := len(s.rset) == 0 + for _, key := range keys { + if resp, ok := s.prefetch[key]; ok { + delete(s.prefetch, key) + s.rset[key] = resp + } + } + resp := s.stm.fetch(keys...) + if firstRead { + // txn's base revision is defined by the first read + s.getOpts = []v3.OpOption{ + v3.WithRev(resp.Header.Revision), + v3.WithSerializable(), + } + } + return respToValue(resp) +} + +func (s *stmSerializable) Rev(key string) int64 { + s.Get(key) + return s.stm.Rev(key) +} + +func (s *stmSerializable) gets() ([]string, []v3.Op) { + keys := make([]string, 0, len(s.rset)) + ops := make([]v3.Op, 0, len(s.rset)) + for k := range s.rset { + keys = append(keys, k) + ops = append(ops, v3.OpGet(k)) + } + return keys, ops +} + +func (s *stmSerializable) commit() *v3.TxnResponse { + keys, getops := s.gets() + txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...) + // use Else to prefetch keys in case of conflict to save a round trip + txnresp, err := txn.Else(getops...).Commit() + if err != nil { + panic(stmError{err}) + } + if txnresp.Succeeded { + return txnresp + } + // load prefetch with Else data + s.rset.add(keys, txnresp) + s.prefetch = s.rset + s.getOpts = nil + return nil +} + +func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp { + if len(r.Kvs) != 0 { + return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision) + } + return v3.Compare(v3.ModRevision(k), "=", 0) +} + +func respToValue(resp *v3.GetResponse) string { + if resp == nil || len(resp.Kvs) == 0 { + return "" + } + return string(resp.Kvs[0].Value) +} + +// NewSTMRepeatable is deprecated. +func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads)) +} + +// NewSTMSerializable is deprecated. +func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable)) +} + +// NewSTMReadCommitted is deprecated. +func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) { + return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted)) +} diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go index d1d5f40906a1..fee12eaf60bb 100644 --- a/vendor/github.com/coreos/etcd/clientv3/config.go +++ b/vendor/github.com/coreos/etcd/clientv3/config.go @@ -16,98 +16,60 @@ package clientv3 import ( "crypto/tls" - "crypto/x509" - "io/ioutil" "time" - "github.com/coreos/etcd/pkg/tlsutil" - "github.com/ghodss/yaml" + "golang.org/x/net/context" + "google.golang.org/grpc" ) type Config struct { - // Endpoints is a list of URLs - Endpoints []string + // Endpoints is a list of URLs. + Endpoints []string `json:"endpoints"` // AutoSyncInterval is the interval to update endpoints with its latest members. // 0 disables auto-sync. By default auto-sync is disabled. - AutoSyncInterval time.Duration + AutoSyncInterval time.Duration `json:"auto-sync-interval"` // DialTimeout is the timeout for failing to establish a connection. - DialTimeout time.Duration + DialTimeout time.Duration `json:"dial-timeout"` + + // DialKeepAliveTime is the time in seconds after which client pings the server to see if + // transport is alive. + DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"` + + // DialKeepAliveTimeout is the time in seconds that the client waits for a response for the + // keep-alive probe. If the response is not received in this time, the connection is closed. + DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"` + + // MaxCallSendMsgSize is the client-side request send limit in bytes. + // If 0, it defaults to 2.0 MiB (2 * 1024 * 1024). + // Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit. + // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + MaxCallSendMsgSize int + + // MaxCallRecvMsgSize is the client-side response receive limit. + // If 0, it defaults to "math.MaxInt32", because range response can + // easily exceed request send limits. + // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit. + // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes"). + MaxCallRecvMsgSize int // TLS holds the client secure credentials, if any. TLS *tls.Config - // Username is a username for authentication - Username string + // Username is a user name for authentication. + Username string `json:"username"` - // Password is a password for authentication - Password string -} + // Password is a password for authentication. + Password string `json:"password"` -type yamlConfig struct { - Endpoints []string `json:"endpoints"` - AutoSyncInterval time.Duration `json:"auto-sync-interval"` - DialTimeout time.Duration `json:"dial-timeout"` - InsecureTransport bool `json:"insecure-transport"` - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify"` - Certfile string `json:"cert-file"` - Keyfile string `json:"key-file"` - CAfile string `json:"ca-file"` -} + // RejectOldCluster when set will refuse to create a client against an outdated cluster. + RejectOldCluster bool `json:"reject-old-cluster"` + + // DialOptions is a list of dial options for the grpc client (e.g., for interceptors). + DialOptions []grpc.DialOption -func configFromFile(fpath string) (*Config, error) { - b, err := ioutil.ReadFile(fpath) - if err != nil { - return nil, err - } - - yc := &yamlConfig{} - - err = yaml.Unmarshal(b, yc) - if err != nil { - return nil, err - } - - cfg := &Config{ - Endpoints: yc.Endpoints, - AutoSyncInterval: yc.AutoSyncInterval, - DialTimeout: yc.DialTimeout, - } - - if yc.InsecureTransport { - cfg.TLS = nil - return cfg, nil - } - - var ( - cert *tls.Certificate - cp *x509.CertPool - ) - - if yc.Certfile != "" && yc.Keyfile != "" { - cert, err = tlsutil.NewCert(yc.Certfile, yc.Keyfile, nil) - if err != nil { - return nil, err - } - } - - if yc.CAfile != "" { - cp, err = tlsutil.NewCertPool([]string{yc.CAfile}) - if err != nil { - return nil, err - } - } - - tlscfg := &tls.Config{ - MinVersion: tls.VersionTLS10, - InsecureSkipVerify: yc.InsecureSkipTLSVerify, - RootCAs: cp, - } - if cert != nil { - tlscfg.Certificates = []tls.Certificate{*cert} - } - cfg.TLS = tlscfg - - return cfg, nil + // Context is the default client context; it can be used to cancel grpc dial out and + // other operations that do not have an explicit context. + Context context.Context } diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/github.com/coreos/etcd/clientv3/doc.go index 470ca4dc4769..dacc5bb346fc 100644 --- a/vendor/github.com/coreos/etcd/clientv3/doc.go +++ b/vendor/github.com/coreos/etcd/clientv3/doc.go @@ -28,7 +28,7 @@ // Make sure to close the client after using it. If the client is not closed, the // connection will have leaky goroutines. // -// To specify client request timeout, pass context.WithTimeout to APIs: +// To specify a client request timeout, wrap the context with context.WithTimeout: // // ctx, cancel := context.WithTimeout(context.Background(), timeout) // resp, err := kvc.Put(ctx, "sample_key", "sample_value") diff --git a/vendor/github.com/coreos/etcd/clientv3/grpc_options.go b/vendor/github.com/coreos/etcd/clientv3/grpc_options.go new file mode 100644 index 000000000000..592dd6993cf6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/grpc_options.go @@ -0,0 +1,46 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "math" + + "google.golang.org/grpc" +) + +var ( + // Disable gRPC internal retrial logic + // TODO: enable when gRPC retry is stable (FailFast=false) + // Reference: + // - https://github.com/grpc/grpc-go/issues/1532 + // - https://github.com/grpc/proposal/blob/master/A6-client-retries.md + defaultFailFast = grpc.FailFast(true) + + // client-side request send limit, gRPC default is math.MaxInt32 + // Make sure that "client-side send limit < server-side default send/recv limit" + // Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes + defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024) + + // client-side response receive limit, gRPC default is 4MB + // Make sure that "client-side receive limit >= server-side default send/recv limit" + // because range response can easily exceed request send limits + // Default to math.MaxInt32; writes exceeding server-side send limit fails anyway + defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32) +) + +// defaultCallOpts defines a list of default "gRPC.CallOption". +// Some options are exposed to "clientv3.Config". +// Defaults will be overridden by the settings in "clientv3.Config". +var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize} diff --git a/vendor/github.com/coreos/etcd/clientv3/health_balancer.go b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go new file mode 100644 index 000000000000..52bea90e66ed --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/health_balancer.go @@ -0,0 +1,627 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import ( + "errors" + "net/url" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +const ( + minHealthRetryDuration = 3 * time.Second + unknownService = "unknown service grpc.health.v1.Health" +) + +// ErrNoAddrAvilable is returned by Get() when the balancer does not have +// any active connection to endpoints at the time. +// This error is returned only when opts.BlockingWait is true. +var ErrNoAddrAvilable = status.Error(codes.Unavailable, "there is no address available") + +type healthCheckFunc func(ep string) (bool, error) + +type notifyMsg int + +const ( + notifyReset notifyMsg = iota + notifyNext +) + +// healthBalancer does the bare minimum to expose multiple eps +// to the grpc reconnection code path +type healthBalancer struct { + // addrs are the client's endpoint addresses for grpc + addrs []grpc.Address + + // eps holds the raw endpoints from the client + eps []string + + // notifyCh notifies grpc of the set of addresses for connecting + notifyCh chan []grpc.Address + + // readyc closes once the first connection is up + readyc chan struct{} + readyOnce sync.Once + + // healthCheck checks an endpoint's health. + healthCheck healthCheckFunc + healthCheckTimeout time.Duration + + unhealthyMu sync.RWMutex + unhealthyHostPorts map[string]time.Time + + // mu protects all fields below. + mu sync.RWMutex + + // upc closes when pinAddr transitions from empty to non-empty or the balancer closes. + upc chan struct{} + + // downc closes when grpc calls down() on pinAddr + downc chan struct{} + + // stopc is closed to signal updateNotifyLoop should stop. + stopc chan struct{} + stopOnce sync.Once + wg sync.WaitGroup + + // donec closes when all goroutines are exited + donec chan struct{} + + // updateAddrsC notifies updateNotifyLoop to update addrs. + updateAddrsC chan notifyMsg + + // grpc issues TLS cert checks using the string passed into dial so + // that string must be the host. To recover the full scheme://host URL, + // have a map from hosts to the original endpoint. + hostPort2ep map[string]string + + // pinAddr is the currently pinned address; set to the empty string on + // initialization and shutdown. + pinAddr string + + closed bool +} + +func newHealthBalancer(eps []string, timeout time.Duration, hc healthCheckFunc) *healthBalancer { + notifyCh := make(chan []grpc.Address) + addrs := eps2addrs(eps) + hb := &healthBalancer{ + addrs: addrs, + eps: eps, + notifyCh: notifyCh, + readyc: make(chan struct{}), + healthCheck: hc, + unhealthyHostPorts: make(map[string]time.Time), + upc: make(chan struct{}), + stopc: make(chan struct{}), + downc: make(chan struct{}), + donec: make(chan struct{}), + updateAddrsC: make(chan notifyMsg), + hostPort2ep: getHostPort2ep(eps), + } + if timeout < minHealthRetryDuration { + timeout = minHealthRetryDuration + } + hb.healthCheckTimeout = timeout + + close(hb.downc) + go hb.updateNotifyLoop() + hb.wg.Add(1) + go func() { + defer hb.wg.Done() + hb.updateUnhealthy() + }() + return hb +} + +func (b *healthBalancer) Start(target string, config grpc.BalancerConfig) error { return nil } + +func (b *healthBalancer) ConnectNotify() <-chan struct{} { + b.mu.Lock() + defer b.mu.Unlock() + return b.upc +} + +func (b *healthBalancer) ready() <-chan struct{} { return b.readyc } + +func (b *healthBalancer) endpoint(hostPort string) string { + b.mu.RLock() + defer b.mu.RUnlock() + return b.hostPort2ep[hostPort] +} + +func (b *healthBalancer) pinned() string { + b.mu.RLock() + defer b.mu.RUnlock() + return b.pinAddr +} + +func (b *healthBalancer) hostPortError(hostPort string, err error) { + if b.endpoint(hostPort) == "" { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is stale (skip marking as unhealthy on %q)", hostPort, err.Error()) + } + return + } + + b.unhealthyMu.Lock() + b.unhealthyHostPorts[hostPort] = time.Now() + b.unhealthyMu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is marked unhealthy (%q)", hostPort, err.Error()) + } +} + +func (b *healthBalancer) removeUnhealthy(hostPort, msg string) { + if b.endpoint(hostPort) == "" { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q was not in unhealthy (%q)", hostPort, msg) + } + return + } + + b.unhealthyMu.Lock() + delete(b.unhealthyHostPorts, hostPort) + b.unhealthyMu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is removed from unhealthy (%q)", hostPort, msg) + } +} + +func (b *healthBalancer) countUnhealthy() (count int) { + b.unhealthyMu.RLock() + count = len(b.unhealthyHostPorts) + b.unhealthyMu.RUnlock() + return count +} + +func (b *healthBalancer) isUnhealthy(hostPort string) (unhealthy bool) { + b.unhealthyMu.RLock() + _, unhealthy = b.unhealthyHostPorts[hostPort] + b.unhealthyMu.RUnlock() + return unhealthy +} + +func (b *healthBalancer) cleanupUnhealthy() { + b.unhealthyMu.Lock() + for k, v := range b.unhealthyHostPorts { + if time.Since(v) > b.healthCheckTimeout { + delete(b.unhealthyHostPorts, k) + if logger.V(4) { + logger.Infof("clientv3/balancer: removed %q from unhealthy after %v", k, b.healthCheckTimeout) + } + } + } + b.unhealthyMu.Unlock() +} + +func (b *healthBalancer) liveAddrs() ([]grpc.Address, map[string]struct{}) { + unhealthyCnt := b.countUnhealthy() + + b.mu.RLock() + defer b.mu.RUnlock() + + hbAddrs := b.addrs + if len(b.addrs) == 1 || unhealthyCnt == 0 || unhealthyCnt == len(b.addrs) { + liveHostPorts := make(map[string]struct{}, len(b.hostPort2ep)) + for k := range b.hostPort2ep { + liveHostPorts[k] = struct{}{} + } + return hbAddrs, liveHostPorts + } + + addrs := make([]grpc.Address, 0, len(b.addrs)-unhealthyCnt) + liveHostPorts := make(map[string]struct{}, len(addrs)) + for _, addr := range b.addrs { + if !b.isUnhealthy(addr.Addr) { + addrs = append(addrs, addr) + liveHostPorts[addr.Addr] = struct{}{} + } + } + return addrs, liveHostPorts +} + +func (b *healthBalancer) updateUnhealthy() { + for { + select { + case <-time.After(b.healthCheckTimeout): + b.cleanupUnhealthy() + pinned := b.pinned() + if pinned == "" || b.isUnhealthy(pinned) { + select { + case b.updateAddrsC <- notifyNext: + case <-b.stopc: + return + } + } + case <-b.stopc: + return + } + } +} + +func (b *healthBalancer) updateAddrs(eps ...string) { + np := getHostPort2ep(eps) + + b.mu.Lock() + defer b.mu.Unlock() + + match := len(np) == len(b.hostPort2ep) + if match { + for k, v := range np { + if b.hostPort2ep[k] != v { + match = false + break + } + } + } + if match { + // same endpoints, so no need to update address + return + } + + b.hostPort2ep = np + b.addrs, b.eps = eps2addrs(eps), eps + + b.unhealthyMu.Lock() + b.unhealthyHostPorts = make(map[string]time.Time) + b.unhealthyMu.Unlock() +} + +func (b *healthBalancer) next() { + b.mu.RLock() + downc := b.downc + b.mu.RUnlock() + select { + case b.updateAddrsC <- notifyNext: + case <-b.stopc: + } + // wait until disconnect so new RPCs are not issued on old connection + select { + case <-downc: + case <-b.stopc: + } +} + +func (b *healthBalancer) updateNotifyLoop() { + defer close(b.donec) + + for { + b.mu.RLock() + upc, downc, addr := b.upc, b.downc, b.pinAddr + b.mu.RUnlock() + // downc or upc should be closed + select { + case <-downc: + downc = nil + default: + } + select { + case <-upc: + upc = nil + default: + } + switch { + case downc == nil && upc == nil: + // stale + select { + case <-b.stopc: + return + default: + } + case downc == nil: + b.notifyAddrs(notifyReset) + select { + case <-upc: + case msg := <-b.updateAddrsC: + b.notifyAddrs(msg) + case <-b.stopc: + return + } + case upc == nil: + select { + // close connections that are not the pinned address + case b.notifyCh <- []grpc.Address{{Addr: addr}}: + case <-downc: + case <-b.stopc: + return + } + select { + case <-downc: + b.notifyAddrs(notifyReset) + case msg := <-b.updateAddrsC: + b.notifyAddrs(msg) + case <-b.stopc: + return + } + } + } +} + +func (b *healthBalancer) notifyAddrs(msg notifyMsg) { + if msg == notifyNext { + select { + case b.notifyCh <- []grpc.Address{}: + case <-b.stopc: + return + } + } + b.mu.RLock() + pinAddr := b.pinAddr + downc := b.downc + b.mu.RUnlock() + addrs, hostPorts := b.liveAddrs() + + var waitDown bool + if pinAddr != "" { + _, ok := hostPorts[pinAddr] + waitDown = !ok + } + + select { + case b.notifyCh <- addrs: + if waitDown { + select { + case <-downc: + case <-b.stopc: + } + } + case <-b.stopc: + } +} + +func (b *healthBalancer) Up(addr grpc.Address) func(error) { + if !b.mayPin(addr) { + return func(err error) {} + } + + b.mu.Lock() + defer b.mu.Unlock() + + // gRPC might call Up after it called Close. We add this check + // to "fix" it up at application layer. Otherwise, will panic + // if b.upc is already closed. + if b.closed { + return func(err error) {} + } + + // gRPC might call Up on a stale address. + // Prevent updating pinAddr with a stale address. + if !hasAddr(b.addrs, addr.Addr) { + return func(err error) {} + } + + if b.pinAddr != "" { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is up but not pinned (already pinned %q)", addr.Addr, b.pinAddr) + } + return func(err error) {} + } + + // notify waiting Get()s and pin first connected address + close(b.upc) + b.downc = make(chan struct{}) + b.pinAddr = addr.Addr + if logger.V(4) { + logger.Infof("clientv3/balancer: pin %q", addr.Addr) + } + + // notify client that a connection is up + b.readyOnce.Do(func() { close(b.readyc) }) + + return func(err error) { + // If connected to a black hole endpoint or a killed server, the gRPC ping + // timeout will induce a network I/O error, and retrying until success; + // finding healthy endpoint on retry could take several timeouts and redials. + // To avoid wasting retries, gray-list unhealthy endpoints. + b.hostPortError(addr.Addr, err) + + b.mu.Lock() + b.upc = make(chan struct{}) + close(b.downc) + b.pinAddr = "" + b.mu.Unlock() + if logger.V(4) { + logger.Infof("clientv3/balancer: unpin %q (%q)", addr.Addr, err.Error()) + } + } +} + +func (b *healthBalancer) mayPin(addr grpc.Address) bool { + if b.endpoint(addr.Addr) == "" { // stale host:port + return false + } + + b.unhealthyMu.RLock() + unhealthyCnt := len(b.unhealthyHostPorts) + failedTime, bad := b.unhealthyHostPorts[addr.Addr] + b.unhealthyMu.RUnlock() + + b.mu.RLock() + skip := len(b.addrs) == 1 || unhealthyCnt == 0 || len(b.addrs) == unhealthyCnt + b.mu.RUnlock() + if skip || !bad { + return true + } + + // prevent isolated member's endpoint from being infinitely retried, as follows: + // 1. keepalive pings detects GoAway with http2.ErrCodeEnhanceYourCalm + // 2. balancer 'Up' unpins with grpc: failed with network I/O error + // 3. grpc-healthcheck still SERVING, thus retry to pin + // instead, return before grpc-healthcheck if failed within healthcheck timeout + if elapsed := time.Since(failedTime); elapsed < b.healthCheckTimeout { + if logger.V(4) { + logger.Infof("clientv3/balancer: %q is up but not pinned (failed %v ago, require minimum %v after failure)", addr.Addr, elapsed, b.healthCheckTimeout) + } + return false + } + + if ok, _ := b.healthCheck(addr.Addr); ok { + b.removeUnhealthy(addr.Addr, "health check success") + return true + } + + b.hostPortError(addr.Addr, errors.New("health check failed")) + return false +} + +func (b *healthBalancer) Get(ctx context.Context, opts grpc.BalancerGetOptions) (grpc.Address, func(), error) { + var ( + addr string + closed bool + ) + + // If opts.BlockingWait is false (for fail-fast RPCs), it should return + // an address it has notified via Notify immediately instead of blocking. + if !opts.BlockingWait { + b.mu.RLock() + closed = b.closed + addr = b.pinAddr + b.mu.RUnlock() + if closed { + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + } + if addr == "" { + return grpc.Address{Addr: ""}, nil, ErrNoAddrAvilable + } + return grpc.Address{Addr: addr}, func() {}, nil + } + + for { + b.mu.RLock() + ch := b.upc + b.mu.RUnlock() + select { + case <-ch: + case <-b.donec: + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + case <-ctx.Done(): + return grpc.Address{Addr: ""}, nil, ctx.Err() + } + b.mu.RLock() + closed = b.closed + addr = b.pinAddr + b.mu.RUnlock() + // Close() which sets b.closed = true can be called before Get(), Get() must exit if balancer is closed. + if closed { + return grpc.Address{Addr: ""}, nil, grpc.ErrClientConnClosing + } + if addr != "" { + break + } + } + return grpc.Address{Addr: addr}, func() {}, nil +} + +func (b *healthBalancer) Notify() <-chan []grpc.Address { return b.notifyCh } + +func (b *healthBalancer) Close() error { + b.mu.Lock() + // In case gRPC calls close twice. TODO: remove the checking + // when we are sure that gRPC wont call close twice. + if b.closed { + b.mu.Unlock() + <-b.donec + return nil + } + b.closed = true + b.stopOnce.Do(func() { close(b.stopc) }) + b.pinAddr = "" + + // In the case of following scenario: + // 1. upc is not closed; no pinned address + // 2. client issues an RPC, calling invoke(), which calls Get(), enters for loop, blocks + // 3. client.conn.Close() calls balancer.Close(); closed = true + // 4. for loop in Get() never exits since ctx is the context passed in by the client and may not be canceled + // we must close upc so Get() exits from blocking on upc + select { + case <-b.upc: + default: + // terminate all waiting Get()s + close(b.upc) + } + + b.mu.Unlock() + b.wg.Wait() + + // wait for updateNotifyLoop to finish + <-b.donec + close(b.notifyCh) + + return nil +} + +func grpcHealthCheck(client *Client, ep string) (bool, error) { + conn, err := client.dial(ep) + if err != nil { + return false, err + } + defer conn.Close() + cli := healthpb.NewHealthClient(conn) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + resp, err := cli.Check(ctx, &healthpb.HealthCheckRequest{}) + cancel() + if err != nil { + if s, ok := status.FromError(err); ok && s.Code() == codes.Unavailable { + if s.Message() == unknownService { // etcd < v3.3.0 + return true, nil + } + } + return false, err + } + return resp.Status == healthpb.HealthCheckResponse_SERVING, nil +} + +func hasAddr(addrs []grpc.Address, targetAddr string) bool { + for _, addr := range addrs { + if targetAddr == addr.Addr { + return true + } + } + return false +} + +func getHost(ep string) string { + url, uerr := url.Parse(ep) + if uerr != nil || !strings.Contains(ep, "://") { + return ep + } + return url.Host +} + +func eps2addrs(eps []string) []grpc.Address { + addrs := make([]grpc.Address, len(eps)) + for i := range eps { + addrs[i].Addr = getHost(eps[i]) + } + return addrs +} + +func getHostPort2ep(eps []string) map[string]string { + hm := make(map[string]string, len(eps)) + for i := range eps { + _, host, _ := parseEndpoint(eps[i]) + hm[host] = eps[i] + } + return hm +} diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go index c8350f9268b2..6289605c8e05 100644 --- a/vendor/github.com/coreos/etcd/clientv3/kv.go +++ b/vendor/github.com/coreos/etcd/clientv3/kv.go @@ -16,6 +16,7 @@ package clientv3 import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" ) @@ -32,7 +33,7 @@ type KV interface { // Put puts a key-value pair into etcd. // Note that key,value can be plain bytes array and string is // an immutable representation of that bytes array. - // To get a string of bytes, do string([]byte(0x10, 0x20)). + // To get a string of bytes, do string([]byte{0x10, 0x20}). Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) // Get retrieves keys. @@ -51,11 +52,6 @@ type KV interface { // Compact compacts etcd KV history before the given rev. Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) - // Do applies a single Op on KV without a transaction. - // Do is useful when declaring operations to be issued at a later time - // whereas Get/Put/Delete are for better suited for when the operation - // should be immediately issued at time of declaration. - // Do applies a single Op on KV without a transaction. // Do is useful when creating arbitrary operations to be issued at a // later time; the user can range over the operations, calling Do to @@ -71,22 +67,46 @@ type OpResponse struct { put *PutResponse get *GetResponse del *DeleteResponse + txn *TxnResponse } func (op OpResponse) Put() *PutResponse { return op.put } func (op OpResponse) Get() *GetResponse { return op.get } func (op OpResponse) Del() *DeleteResponse { return op.del } +func (op OpResponse) Txn() *TxnResponse { return op.txn } + +func (resp *PutResponse) OpResponse() OpResponse { + return OpResponse{put: resp} +} +func (resp *GetResponse) OpResponse() OpResponse { + return OpResponse{get: resp} +} +func (resp *DeleteResponse) OpResponse() OpResponse { + return OpResponse{del: resp} +} +func (resp *TxnResponse) OpResponse() OpResponse { + return OpResponse{txn: resp} +} type kv struct { - remote pb.KVClient + remote pb.KVClient + callOpts []grpc.CallOption } func NewKV(c *Client) KV { - return &kv{remote: RetryKVClient(c)} + api := &kv{remote: RetryKVClient(c)} + if c != nil { + api.callOpts = c.callOpts + } + return api } -func NewKVFromKVClient(remote pb.KVClient) KV { - return &kv{remote: remote} +func NewKVFromKVClient(remote pb.KVClient, c *Client) KV { + api := &kv{remote: remote} + if c != nil { + api.callOpts = c.callOpts + } + return api } func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) { @@ -105,7 +125,7 @@ func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*Delete } func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) { - resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest()) + resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...) if err != nil { return nil, toErr(ctx, err) } @@ -114,54 +134,43 @@ func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*C func (kv *kv) Txn(ctx context.Context) Txn { return &txn{ - kv: kv, - ctx: ctx, + kv: kv, + ctx: ctx, + callOpts: kv.callOpts, } } func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) { - for { - resp, err := kv.do(ctx, op) - if err == nil { - return resp, nil - } - - if isHaltErr(ctx, err) { - return resp, toErr(ctx, err) - } - // do not retry on modifications - if op.isWrite() { - return resp, toErr(ctx, err) - } - } -} - -func (kv *kv) do(ctx context.Context, op Op) (OpResponse, error) { var err error switch op.t { - // TODO: handle other ops case tRange: var resp *pb.RangeResponse - resp, err = kv.remote.Range(ctx, op.toRangeRequest(), grpc.FailFast(false)) + resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...) if err == nil { return OpResponse{get: (*GetResponse)(resp)}, nil } case tPut: var resp *pb.PutResponse - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV} - resp, err = kv.remote.Put(ctx, r) + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} + resp, err = kv.remote.Put(ctx, r, kv.callOpts...) if err == nil { return OpResponse{put: (*PutResponse)(resp)}, nil } case tDeleteRange: var resp *pb.DeleteRangeResponse r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} - resp, err = kv.remote.DeleteRange(ctx, r) + resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...) if err == nil { return OpResponse{del: (*DeleteResponse)(resp)}, nil } + case tTxn: + var resp *pb.TxnResponse + resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...) + if err == nil { + return OpResponse{txn: (*TxnResponse)(resp)}, nil + } default: panic("Unknown op") } - return OpResponse{}, err + return OpResponse{}, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go index 10d3dd0b27f8..e74e1d6b549f 100644 --- a/vendor/github.com/coreos/etcd/clientv3/lease.go +++ b/vendor/github.com/coreos/etcd/clientv3/lease.go @@ -20,8 +20,10 @@ import ( "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" + "google.golang.org/grpc/metadata" ) type ( @@ -29,7 +31,7 @@ type ( LeaseID int64 ) -// LeaseGrantResponse is used to convert the protobuf grant response. +// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse. type LeaseGrantResponse struct { *pb.ResponseHeader ID LeaseID @@ -37,14 +39,14 @@ type LeaseGrantResponse struct { Error string } -// LeaseKeepAliveResponse is used to convert the protobuf keepalive response. +// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse. type LeaseKeepAliveResponse struct { *pb.ResponseHeader ID LeaseID TTL int64 } -// LeaseTimeToLiveResponse is used to convert the protobuf lease timetolive response. +// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse. type LeaseTimeToLiveResponse struct { *pb.ResponseHeader ID LeaseID `json:"id"` @@ -59,6 +61,12 @@ type LeaseTimeToLiveResponse struct { Keys [][]byte `json:"keys"` } +// LeaseStatus represents a lease status. +type LeaseStatus struct { + ID LeaseID `json:"id"` + // TODO: TTL int64 +} + const ( // defaultTTL is the assumed lease TTL used for the first keepalive // deadline before the actual TTL is known to the client. @@ -67,6 +75,9 @@ const ( leaseResponseChSize = 16 // NoLease is a lease ID for the absence of a lease. NoLease LeaseID = 0 + + // retryConnWait is how long to wait before retrying request due to an error + retryConnWait = 500 * time.Millisecond ) // ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error. @@ -97,7 +108,7 @@ type Lease interface { // KeepAlive keeps the given lease alive forever. KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) - // KeepAliveOnce renews the lease once. In most of the cases, Keepalive + // KeepAliveOnce renews the lease once. In most of the cases, KeepAlive // should be used instead of KeepAliveOnce. KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) @@ -126,6 +137,11 @@ type lessor struct { // firstKeepAliveTimeout is the timeout for the first keepalive request // before the actual TTL is known to the lease client firstKeepAliveTimeout time.Duration + + // firstKeepAliveOnce ensures stream starts after first KeepAlive call. + firstKeepAliveOnce sync.Once + + callOpts []grpc.CallOption } // keepAlive multiplexes a keepalive for a lease over multiple channels @@ -141,85 +157,65 @@ type keepAlive struct { } func NewLease(c *Client) Lease { + return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second) +} + +func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease { l := &lessor{ donec: make(chan struct{}), keepAlives: make(map[LeaseID]*keepAlive), - remote: RetryLeaseClient(c), - firstKeepAliveTimeout: c.cfg.DialTimeout + time.Second, + remote: remote, + firstKeepAliveTimeout: keepAliveTimeout, } if l.firstKeepAliveTimeout == time.Second { l.firstKeepAliveTimeout = defaultTTL } - - l.stopCtx, l.stopCancel = context.WithCancel(context.Background()) - go l.recvKeepAliveLoop() - go l.deadlineLoop() + if c != nil { + l.callOpts = c.callOpts + } + reqLeaderCtx := WithRequireLeader(context.Background()) + l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx) return l } func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) { - cctx, cancel := context.WithCancel(ctx) - done := cancelWhenStop(cancel, l.stopCtx.Done()) - defer close(done) - - for { - r := &pb.LeaseGrantRequest{TTL: ttl} - resp, err := l.remote.LeaseGrant(cctx, r) - if err == nil { - gresp := &LeaseGrantResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - Error: resp.Error, - } - return gresp, nil - } - if isHaltErr(cctx, err) { - return nil, toErr(cctx, err) + r := &pb.LeaseGrantRequest{TTL: ttl} + resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...) + if err == nil { + gresp := &LeaseGrantResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + Error: resp.Error, } + return gresp, nil } + return nil, toErr(ctx, err) } func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) { - cctx, cancel := context.WithCancel(ctx) - done := cancelWhenStop(cancel, l.stopCtx.Done()) - defer close(done) - - for { - r := &pb.LeaseRevokeRequest{ID: int64(id)} - resp, err := l.remote.LeaseRevoke(cctx, r) - - if err == nil { - return (*LeaseRevokeResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + r := &pb.LeaseRevokeRequest{ID: int64(id)} + resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...) + if err == nil { + return (*LeaseRevokeResponse)(resp), nil } + return nil, toErr(ctx, err) } func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) { - cctx, cancel := context.WithCancel(ctx) - done := cancelWhenStop(cancel, l.stopCtx.Done()) - defer close(done) - - for { - r := toLeaseTimeToLiveRequest(id, opts...) - resp, err := l.remote.LeaseTimeToLive(cctx, r, grpc.FailFast(false)) - if err == nil { - gresp := &LeaseTimeToLiveResponse{ - ResponseHeader: resp.GetHeader(), - ID: LeaseID(resp.ID), - TTL: resp.TTL, - GrantedTTL: resp.GrantedTTL, - Keys: resp.Keys, - } - return gresp, nil - } - if isHaltErr(cctx, err) { - return nil, toErr(cctx, err) + r := toLeaseTimeToLiveRequest(id, opts...) + resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...) + if err == nil { + gresp := &LeaseTimeToLiveResponse{ + ResponseHeader: resp.GetHeader(), + ID: LeaseID(resp.ID), + TTL: resp.TTL, + GrantedTTL: resp.GrantedTTL, + Keys: resp.Keys, } + return gresp, nil } + return nil, toErr(ctx, err) } func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { @@ -254,19 +250,19 @@ func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAl l.mu.Unlock() go l.keepAliveCtxCloser(id, ctx, ka.donec) + l.firstKeepAliveOnce.Do(func() { + go l.recvKeepAliveLoop() + go l.deadlineLoop() + }) return ch, nil } func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { - cctx, cancel := context.WithCancel(ctx) - done := cancelWhenStop(cancel, l.stopCtx.Done()) - defer close(done) - for { - resp, err := l.keepAliveOnce(cctx, id) + resp, err := l.keepAliveOnce(ctx, id) if err == nil { - if resp.TTL == 0 { + if resp.TTL <= 0 { err = rpctypes.ErrLeaseNotFound } return resp, err @@ -279,6 +275,8 @@ func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAlive func (l *lessor) Close() error { l.stopCancel() + // close for synchronous teardown if stream goroutines never launched + l.firstKeepAliveOnce.Do(func() { close(l.donec) }) <-l.donec return nil } @@ -315,11 +313,50 @@ func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-cha } } +// closeRequireLeader scans keepAlives for ctxs that have require leader +// and closes the associated channels. +func (l *lessor) closeRequireLeader() { + l.mu.Lock() + defer l.mu.Unlock() + for _, ka := range l.keepAlives { + reqIdxs := 0 + // find all required leader channels, close, mark as nil + for i, ctx := range ka.ctxs { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + continue + } + ks := md[rpctypes.MetadataRequireLeaderKey] + if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader { + continue + } + close(ka.chs[i]) + ka.chs[i] = nil + reqIdxs++ + } + if reqIdxs == 0 { + continue + } + // remove all channels that required a leader from keepalive + newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs) + newCtxs := make([]context.Context, len(newChs)) + newIdx := 0 + for i := range ka.chs { + if ka.chs[i] == nil { + continue + } + newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx] + newIdx++ + } + ka.chs, ka.ctxs = newChs, newCtxs + } +} + func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) { cctx, cancel := context.WithCancel(ctx) defer cancel() - stream, err := l.remote.LeaseKeepAlive(cctx, grpc.FailFast(false)) + stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...) if err != nil { return nil, toErr(ctx, err) } @@ -348,32 +385,50 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) { close(l.donec) l.loopErr = gerr for _, ka := range l.keepAlives { - ka.Close() + ka.close() } l.keepAlives = make(map[LeaseID]*keepAlive) l.mu.Unlock() }() - stream, serr := l.resetRecv() - for serr == nil { - resp, err := stream.Recv() + for { + stream, err := l.resetRecv() if err != nil { - if isHaltErr(l.stopCtx, err) { + if canceledByCaller(l.stopCtx, err) { return err } - stream, serr = l.resetRecv() + } else { + for { + resp, err := stream.Recv() + if err != nil { + if canceledByCaller(l.stopCtx, err) { + return err + } + + if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader { + l.closeRequireLeader() + } + break + } + + l.recvKeepAlive(resp) + } + } + + select { + case <-time.After(retryConnWait): continue + case <-l.stopCtx.Done(): + return l.stopCtx.Err() } - l.recvKeepAlive(resp) } - return serr } -// resetRecv opens a new lease stream and starts sending LeaseKeepAliveRequests +// resetRecv opens a new lease stream and starts sending keep alive requests. func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { sctx, cancel := context.WithCancel(l.stopCtx) - stream, err := l.remote.LeaseKeepAlive(sctx, grpc.FailFast(false)) - if err = toErr(sctx, err); err != nil { + stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...) + if err != nil { cancel() return nil, err } @@ -381,7 +436,6 @@ func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) { l.mu.Lock() defer l.mu.Unlock() if l.stream != nil && l.streamCancel != nil { - l.stream.CloseSend() l.streamCancel() } @@ -411,7 +465,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) { if karesp.TTL <= 0 { // lease expired; close all keep alive channels delete(l.keepAlives, karesp.ID) - ka.Close() + ka.close() return } @@ -441,7 +495,7 @@ func (l *lessor) deadlineLoop() { for id, ka := range l.keepAlives { if ka.deadline.Before(now) { // waited too long for response; lease may be expired - ka.Close() + ka.close() delete(l.keepAlives, id) } } @@ -449,19 +503,9 @@ func (l *lessor) deadlineLoop() { } } -// sendKeepAliveLoop sends LeaseKeepAliveRequests for the lifetime of a lease stream +// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream. func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { for { - select { - case <-time.After(500 * time.Millisecond): - case <-stream.Context().Done(): - return - case <-l.donec: - return - case <-l.stopCtx.Done(): - return - } - var tosend []LeaseID now := time.Now() @@ -480,29 +524,22 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) { return } } + + select { + case <-time.After(500 * time.Millisecond): + case <-stream.Context().Done(): + return + case <-l.donec: + return + case <-l.stopCtx.Done(): + return + } } } -func (ka *keepAlive) Close() { +func (ka *keepAlive) close() { close(ka.donec) for _, ch := range ka.chs { close(ch) } } - -// cancelWhenStop calls cancel when the given stopc fires. It returns a done chan. done -// should be closed when the work is finished. When done fires, cancelWhenStop will release -// its internal resource. -func cancelWhenStop(cancel context.CancelFunc, stopc <-chan struct{}) chan<- struct{} { - done := make(chan struct{}, 1) - - go func() { - select { - case <-stopc: - case <-done: - } - cancel() - }() - - return done -} diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go index 519db45d8e34..012abdbce639 100644 --- a/vendor/github.com/coreos/etcd/clientv3/logger.go +++ b/vendor/github.com/coreos/etcd/clientv3/logger.go @@ -16,36 +16,35 @@ package clientv3 import ( "io/ioutil" - "log" "sync" "google.golang.org/grpc/grpclog" ) // Logger is the logger used by client library. -// It implements grpclog.Logger interface. -type Logger grpclog.Logger +// It implements grpclog.LoggerV2 interface. +type Logger grpclog.LoggerV2 var ( logger settableLogger ) type settableLogger struct { - l grpclog.Logger + l grpclog.LoggerV2 mu sync.RWMutex } func init() { // disable client side logs by default logger.mu.Lock() - logger.l = log.New(ioutil.Discard, "", 0) + logger.l = grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard) // logger has to override the grpclog at initialization so that // any changes to the grpclog go through logger with locking // instead of through SetLogger // // now updates only happen through settableLogger.set - grpclog.SetLogger(&logger) + grpclog.SetLoggerV2(&logger) logger.mu.Unlock() } @@ -62,6 +61,7 @@ func GetLogger() Logger { func (s *settableLogger) set(l Logger) { s.mu.Lock() logger.l = l + grpclog.SetLoggerV2(&logger) s.mu.Unlock() } @@ -72,11 +72,25 @@ func (s *settableLogger) get() Logger { return l } -// implement the grpclog.Logger interface +// implement the grpclog.LoggerV2 interface +func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) } +func (s *settableLogger) Warningf(format string, args ...interface{}) { + s.get().Warningf(format, args...) +} +func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) } +func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) } +func (s *settableLogger) Errorf(format string, args ...interface{}) { + s.get().Errorf(format, args...) +} +func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) } func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) } func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) } func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) } -func (s *settableLogger) Print(args ...interface{}) { s.get().Print(args...) } -func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Printf(format, args...) } -func (s *settableLogger) Println(args ...interface{}) { s.get().Println(args...) } +func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) } +func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) } +func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) } +func (s *settableLogger) V(l int) bool { return s.get().V(l) } diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go index 718356250beb..67b928fcfb3a 100644 --- a/vendor/github.com/coreos/etcd/clientv3/maintenance.go +++ b/vendor/github.com/coreos/etcd/clientv3/maintenance.go @@ -18,6 +18,7 @@ import ( "io" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" ) @@ -36,7 +37,7 @@ type Maintenance interface { // AlarmDisarm disarms a given alarm. AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error) - // Defragment defragments storage backend of the etcd member with given endpoint. + // Defragment releases wasted space from internal fragmentation on a given etcd member. // Defragment is only needed when deleting a large number of keys and want to reclaim // the resources. // Defragment is an expensive operation. User should avoid defragmenting multiple members @@ -48,17 +49,45 @@ type Maintenance interface { // Status gets the status of the endpoint. Status(ctx context.Context, endpoint string) (*StatusResponse, error) - // Snapshot provides a reader for a snapshot of a backend. + // Snapshot provides a reader for a point-in-time snapshot of etcd. Snapshot(ctx context.Context) (io.ReadCloser, error) } type maintenance struct { - c *Client - remote pb.MaintenanceClient + dial func(endpoint string) (pb.MaintenanceClient, func(), error) + remote pb.MaintenanceClient + callOpts []grpc.CallOption } func NewMaintenance(c *Client) Maintenance { - return &maintenance{c: c, remote: pb.NewMaintenanceClient(c.conn)} + api := &maintenance{ + dial: func(endpoint string) (pb.MaintenanceClient, func(), error) { + conn, err := c.dial(endpoint) + if err != nil { + return nil, nil, err + } + cancel := func() { conn.Close() } + return RetryMaintenanceClient(c, conn), cancel, nil + }, + remote: RetryMaintenanceClient(c, c.conn), + } + if c != nil { + api.callOpts = c.callOpts + } + return api +} + +func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance { + api := &maintenance{ + dial: func(string) (pb.MaintenanceClient, func(), error) { + return remote, func() {}, nil + }, + remote: remote, + } + if c != nil { + api.callOpts = c.callOpts + } + return api } func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { @@ -67,15 +96,11 @@ func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) { MemberID: 0, // all Alarm: pb.AlarmType_NONE, // all } - for { - resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) - if err == nil { - return (*AlarmResponse)(resp), nil - } - if isHaltErr(ctx, err) { - return nil, toErr(ctx, err) - } + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) + if err == nil { + return (*AlarmResponse)(resp), nil } + return nil, toErr(ctx, err) } func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) { @@ -101,7 +126,7 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR return &ret, nil } - resp, err := m.remote.Alarm(ctx, req, grpc.FailFast(false)) + resp, err := m.remote.Alarm(ctx, req, m.callOpts...) if err == nil { return (*AlarmResponse)(resp), nil } @@ -109,13 +134,12 @@ func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmR } func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) { - conn, err := m.c.Dial(endpoint) + remote, cancel, err := m.dial(endpoint) if err != nil { return nil, toErr(ctx, err) } - defer conn.Close() - remote := pb.NewMaintenanceClient(conn) - resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, grpc.FailFast(false)) + defer cancel() + resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...) if err != nil { return nil, toErr(ctx, err) } @@ -123,13 +147,12 @@ func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*Defragm } func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) { - conn, err := m.c.Dial(endpoint) + remote, cancel, err := m.dial(endpoint) if err != nil { return nil, toErr(ctx, err) } - defer conn.Close() - remote := pb.NewMaintenanceClient(conn) - resp, err := remote.Status(ctx, &pb.StatusRequest{}, grpc.FailFast(false)) + defer cancel() + resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...) if err != nil { return nil, toErr(ctx, err) } @@ -137,7 +160,7 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo } func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { - ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false)) + ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, m.callOpts...) if err != nil { return nil, toErr(ctx, err) } diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go index 6e2600766988..e18d28662c46 100644 --- a/vendor/github.com/coreos/etcd/clientv3/op.go +++ b/vendor/github.com/coreos/etcd/clientv3/op.go @@ -23,6 +23,7 @@ const ( tRange opType = iota + 1 tPut tDeleteRange + tTxn ) var ( @@ -52,6 +53,10 @@ type Op struct { // for watch, put, delete prevKV bool + // for put + ignoreValue bool + ignoreLease bool + // progressNotify is for progress updates. progressNotify bool // createdNotify is for created event @@ -63,8 +68,69 @@ type Op struct { // for put val []byte leaseID LeaseID + + // txn + cmps []Cmp + thenOps []Op + elseOps []Op } +// accessors / mutators + +func (op Op) IsTxn() bool { return op.t == tTxn } +func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps } + +// KeyBytes returns the byte slice holding the Op's key. +func (op Op) KeyBytes() []byte { return op.key } + +// WithKeyBytes sets the byte slice for the Op's key. +func (op *Op) WithKeyBytes(key []byte) { op.key = key } + +// RangeBytes returns the byte slice holding with the Op's range end, if any. +func (op Op) RangeBytes() []byte { return op.end } + +// Rev returns the requested revision, if any. +func (op Op) Rev() int64 { return op.rev } + +// IsPut returns true iff the operation is a Put. +func (op Op) IsPut() bool { return op.t == tPut } + +// IsGet returns true iff the operation is a Get. +func (op Op) IsGet() bool { return op.t == tRange } + +// IsDelete returns true iff the operation is a Delete. +func (op Op) IsDelete() bool { return op.t == tDeleteRange } + +// IsSerializable returns true if the serializable field is true. +func (op Op) IsSerializable() bool { return op.serializable == true } + +// IsKeysOnly returns whether keysOnly is set. +func (op Op) IsKeysOnly() bool { return op.keysOnly == true } + +// IsCountOnly returns whether countOnly is set. +func (op Op) IsCountOnly() bool { return op.countOnly == true } + +// MinModRev returns the operation's minimum modify revision. +func (op Op) MinModRev() int64 { return op.minModRev } + +// MaxModRev returns the operation's maximum modify revision. +func (op Op) MaxModRev() int64 { return op.maxModRev } + +// MinCreateRev returns the operation's minimum create revision. +func (op Op) MinCreateRev() int64 { return op.minCreateRev } + +// MaxCreateRev returns the operation's maximum create revision. +func (op Op) MaxCreateRev() int64 { return op.maxCreateRev } + +// WithRangeBytes sets the byte slice for the Op's range end. +func (op *Op) WithRangeBytes(end []byte) { op.end = end } + +// ValueBytes returns the byte slice holding the Op's value, if any. +func (op Op) ValueBytes() []byte { return op.val } + +// WithValueBytes sets the byte slice for the Op's value. +func (op *Op) WithValueBytes(v []byte) { op.val = v } + func (op Op) toRangeRequest() *pb.RangeRequest { if op.t != tRange { panic("op.t != tRange") @@ -89,12 +155,28 @@ func (op Op) toRangeRequest() *pb.RangeRequest { return r } +func (op Op) toTxnRequest() *pb.TxnRequest { + thenOps := make([]*pb.RequestOp, len(op.thenOps)) + for i, tOp := range op.thenOps { + thenOps[i] = tOp.toRequestOp() + } + elseOps := make([]*pb.RequestOp, len(op.elseOps)) + for i, eOp := range op.elseOps { + elseOps[i] = eOp.toRequestOp() + } + cmps := make([]*pb.Compare, len(op.cmps)) + for i := range op.cmps { + cmps[i] = (*pb.Compare)(&op.cmps[i]) + } + return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps} +} + func (op Op) toRequestOp() *pb.RequestOp { switch op.t { case tRange: return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}} case tPut: - r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV} + r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease} return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}} case tDeleteRange: r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV} @@ -105,6 +187,19 @@ func (op Op) toRequestOp() *pb.RequestOp { } func (op Op) isWrite() bool { + if op.t == tTxn { + for _, tOp := range op.thenOps { + if tOp.isWrite() { + return true + } + } + for _, tOp := range op.elseOps { + if tOp.isWrite() { + return true + } + } + return false + } return op.t != tRange } @@ -170,6 +265,10 @@ func OpPut(key, val string, opts ...OpOption) Op { return ret } +func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op { + return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps} +} + func opWatch(key string, opts ...OpOption) Op { ret := Op{t: tRange, key: []byte(key)} ret.applyOpts(opts) @@ -207,6 +306,7 @@ func WithLease(leaseID LeaseID) OpOption { } // WithLimit limits the number of results to return from 'Get' request. +// If WithLimit is given a 0 limit, it is treated as no limit. func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } } // WithRev specifies the store revision for 'Get' request. @@ -222,9 +322,9 @@ func WithSort(target SortTarget, order SortOrder) OpOption { if target == SortByKey && order == SortAscend { // If order != SortNone, server fetches the entire key-space, // and then applies the sort and limit, if provided. - // Since current mvcc.Range implementation returns results - // sorted by keys in lexicographically ascending order, - // client should ignore SortOrder if the target is SortByKey. + // Since by default the server returns results sorted by keys + // in lexicographically ascending order, the client should ignore + // SortOrder if the target is SortByKey. order = SortNone } op.sort = &SortOption{target, order} @@ -257,6 +357,10 @@ func getPrefix(key []byte) []byte { // can return 'foo1', 'foo2', and so on. func WithPrefix() OpOption { return func(op *Op) { + if len(op.key) == 0 { + op.key, op.end = []byte{0}, []byte{0} + return + } op.end = getPrefix(op.key) } } @@ -360,6 +464,24 @@ func WithPrevKV() OpOption { } } +// WithIgnoreValue updates the key using its current value. +// This option can not be combined with non-empty values. +// Returns an error if the key does not exist. +func WithIgnoreValue() OpOption { + return func(op *Op) { + op.ignoreValue = true + } +} + +// WithIgnoreLease updates the key using its current lease. +// This option can not be combined with WithLease. +// Returns an error if the key does not exist. +func WithIgnoreLease() OpOption { + return func(op *Op) { + op.ignoreLease = true + } +} + // LeaseOp represents an Operation that lease can execute. type LeaseOp struct { id LeaseID @@ -377,8 +499,7 @@ func (op *LeaseOp) applyOpts(opts []LeaseOption) { } } -// WithAttachedKeys requests lease timetolive API to return -// attached keys of given lease ID. +// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID. func WithAttachedKeys() LeaseOption { return func(op *LeaseOp) { op.attachedKeys = true } } diff --git a/vendor/github.com/coreos/etcd/clientv3/ready_wait.go b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go new file mode 100644 index 000000000000..23eea9367ff7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/clientv3/ready_wait.go @@ -0,0 +1,30 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package clientv3 + +import "golang.org/x/net/context" + +// TODO: remove this when "FailFast=false" is fixed. +// See https://github.com/grpc/grpc-go/issues/1532. +func readyWait(rpcCtx, clientCtx context.Context, ready <-chan struct{}) error { + select { + case <-ready: + return nil + case <-rpcCtx.Done(): + return rpcCtx.Err() + case <-clientCtx.Done(): + return clientCtx.Err() + } +} diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go index 78f31a8c4b0c..c95b2cad7c4f 100644 --- a/vendor/github.com/coreos/etcd/clientv3/retry.go +++ b/vendor/github.com/coreos/etcd/clientv3/retry.go @@ -17,135 +17,183 @@ package clientv3 import ( "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) type rpcFunc func(ctx context.Context) error -type retryRpcFunc func(context.Context, rpcFunc) error +type retryRPCFunc func(context.Context, rpcFunc) error +type retryStopErrFunc func(error) bool + +func isRepeatableStopError(err error) bool { + eErr := rpctypes.Error(err) + // always stop retry on etcd errors + if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable { + return true + } + // only retry if unavailable + ev, _ := status.FromError(err) + return ev.Code() != codes.Unavailable +} + +func isNonRepeatableStopError(err error) bool { + ev, _ := status.FromError(err) + if ev.Code() != codes.Unavailable { + return true + } + desc := rpctypes.ErrorDesc(err) + return desc != "there is no address available" && desc != "there is no connection available" +} -func (c *Client) newRetryWrapper() retryRpcFunc { +func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRPCFunc { return func(rpcCtx context.Context, f rpcFunc) error { for { + if err := readyWait(rpcCtx, c.ctx, c.balancer.ConnectNotify()); err != nil { + return err + } + pinned := c.balancer.pinned() err := f(rpcCtx) if err == nil { return nil } - - eErr := rpctypes.Error(err) - // always stop retry on etcd errors - if _, ok := eErr.(rpctypes.EtcdError); ok { - return err + if logger.V(4) { + logger.Infof("clientv3/retry: error %q on pinned endpoint %q", err.Error(), pinned) } - // only retry if unavailable - if grpc.Code(err) != codes.Unavailable { - return err + if s, ok := status.FromError(err); ok && (s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded || s.Code() == codes.Internal) { + // mark this before endpoint switch is triggered + c.balancer.hostPortError(pinned, err) + c.balancer.next() + if logger.V(4) { + logger.Infof("clientv3/retry: switching from %q due to error %q", pinned, err.Error()) + } } - select { - case <-c.balancer.ConnectNotify(): - case <-rpcCtx.Done(): - return rpcCtx.Err() - case <-c.ctx.Done(): - return c.ctx.Err() + if isStop(err) { + return err } } } } -func (c *Client) newAuthRetryWrapper() retryRpcFunc { +func (c *Client) newAuthRetryWrapper() retryRPCFunc { return func(rpcCtx context.Context, f rpcFunc) error { for { + pinned := c.balancer.pinned() err := f(rpcCtx) if err == nil { return nil } - + if logger.V(4) { + logger.Infof("clientv3/auth-retry: error %q on pinned endpoint %q", err.Error(), pinned) + } // always stop retry on etcd errors other than invalid auth token if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken { gterr := c.getToken(rpcCtx) if gterr != nil { + if logger.V(4) { + logger.Infof("clientv3/auth-retry: cannot retry due to error %q(%q) on pinned endpoint %q", err.Error(), gterr.Error(), pinned) + } return err // return the original error for simplicity } continue } - return err } } } -// RetryKVClient implements a KVClient that uses the client's FailFast retry policy. +// RetryKVClient implements a KVClient. func RetryKVClient(c *Client) pb.KVClient { - retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper} - return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}} + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + conn := pb.NewKVClient(c.conn) + retryBasic := &retryKVClient{&nonRepeatableKVClient{conn, nonRepeatableRetry}, repeatableRetry} + retryAuthWrapper := c.newAuthRetryWrapper() + return &retryKVClient{ + &nonRepeatableKVClient{retryBasic, retryAuthWrapper}, + retryAuthWrapper} } type retryKVClient struct { - *retryWriteKVClient + *nonRepeatableKVClient + repeatableRetry retryRPCFunc } func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...) + err = rkv.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Range(rctx, in, opts...) return err }) return resp, err } -type retryWriteKVClient struct { - pb.KVClient - retryf retryRpcFunc +type nonRepeatableKVClient struct { + kc pb.KVClient + nonRepeatableRetry retryRPCFunc } -func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Put(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Put(rctx, in, opts...) return err }) return resp, err } -func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.DeleteRange(rctx, in, opts...) return err }) return resp, err } -func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Txn(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) { + // TODO: repeatableRetry if read-only txn + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Txn(rctx, in, opts...) return err }) return resp, err } -func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { - err = rkv.retryf(ctx, func(rctx context.Context) error { - resp, err = rkv.KVClient.Compact(rctx, in, opts...) +func (rkv *nonRepeatableKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) { + err = rkv.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rkv.kc.Compact(rctx, in, opts...) return err }) return resp, err } type retryLeaseClient struct { - pb.LeaseClient - retryf retryRpcFunc + lc pb.LeaseClient + repeatableRetry retryRPCFunc } -// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy. +// RetryLeaseClient implements a LeaseClient. func RetryLeaseClient(c *Client) pb.LeaseClient { - retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper} - return &retryLeaseClient{retry, c.retryAuthWrapper} + retry := &retryLeaseClient{ + pb.NewLeaseClient(c.conn), + c.newRetryWrapper(isRepeatableStopError), + } + return &retryLeaseClient{retry, c.newAuthRetryWrapper()} +} + +func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseTimeToLive(rctx, in, opts...) + return err + }) + return resp, err } func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - resp, err = rlc.LeaseClient.LeaseGrant(rctx, in, opts...) + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseGrant(rctx, in, opts...) return err }) return resp, err @@ -153,140 +201,270 @@ func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRe } func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) { - err = rlc.retryf(ctx, func(rctx context.Context) error { - resp, err = rlc.LeaseClient.LeaseRevoke(rctx, in, opts...) + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rlc.lc.LeaseRevoke(rctx, in, opts...) return err }) return resp, err } +func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) { + err = rlc.repeatableRetry(ctx, func(rctx context.Context) error { + stream, err = rlc.lc.LeaseKeepAlive(rctx, opts...) + return err + }) + return stream, err +} + type retryClusterClient struct { - pb.ClusterClient - retryf retryRpcFunc + *nonRepeatableClusterClient + repeatableRetry retryRPCFunc } -// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy. +// RetryClusterClient implements a ClusterClient. func RetryClusterClient(c *Client) pb.ClusterClient { - return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper} + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + cc := pb.NewClusterClient(c.conn) + return &retryClusterClient{&nonRepeatableClusterClient{cc, nonRepeatableRetry}, repeatableRetry} +} + +func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) { + err = rcc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberList(rctx, in, opts...) + return err + }) + return resp, err +} + +type nonRepeatableClusterClient struct { + cc pb.ClusterClient + nonRepeatableRetry retryRPCFunc +} + +func (rcc *nonRepeatableClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberAdd(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rcc *nonRepeatableClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberRemove(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rcc *nonRepeatableClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { + err = rcc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rcc.cc.MemberUpdate(rctx, in, opts...) + return err + }) + return resp, err +} + +// RetryMaintenanceClient implements a Maintenance. +func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient { + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + mc := pb.NewMaintenanceClient(conn) + return &retryMaintenanceClient{&nonRepeatableMaintenanceClient{mc, nonRepeatableRetry}, repeatableRetry} +} + +type retryMaintenanceClient struct { + *nonRepeatableMaintenanceClient + repeatableRetry retryRPCFunc } -func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberAdd(rctx, in, opts...) +func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Alarm(rctx, in, opts...) return err }) return resp, err } -func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberRemove(rctx, in, opts...) +func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Status(rctx, in, opts...) return err }) return resp, err } -func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) { - err = rcc.retryf(ctx, func(rctx context.Context) error { - resp, err = rcc.ClusterClient.MemberUpdate(rctx, in, opts...) +func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Hash(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) { + err = rmc.repeatableRetry(ctx, func(rctx context.Context) error { + stream, err = rmc.mc.Snapshot(rctx, in, opts...) + return err + }) + return stream, err +} + +type nonRepeatableMaintenanceClient struct { + mc pb.MaintenanceClient + nonRepeatableRetry retryRPCFunc +} + +func (rmc *nonRepeatableMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) { + err = rmc.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rmc.mc.Defragment(rctx, in, opts...) return err }) return resp, err } type retryAuthClient struct { - pb.AuthClient - retryf retryRpcFunc + *nonRepeatableAuthClient + repeatableRetry retryRPCFunc } -// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy. +// RetryAuthClient implements a AuthClient. func RetryAuthClient(c *Client) pb.AuthClient { - return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper} + repeatableRetry := c.newRetryWrapper(isRepeatableStopError) + nonRepeatableRetry := c.newRetryWrapper(isNonRepeatableStopError) + ac := pb.NewAuthClient(c.conn) + return &retryAuthClient{&nonRepeatableAuthClient{ac, nonRepeatableRetry}, repeatableRetry} +} + +func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserList(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserGet(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleGet(rctx, in, opts...) + return err + }) + return resp, err +} + +func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) { + err = rac.repeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleList(rctx, in, opts...) + return err + }) + return resp, err +} + +type nonRepeatableAuthClient struct { + ac pb.AuthClient + nonRepeatableRetry retryRPCFunc +} + +func (rac *nonRepeatableAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.AuthEnable(rctx, in, opts...) + return err + }) + return resp, err } -func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.AuthEnable(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.AuthDisable(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.AuthDisable(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserAdd(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserAdd(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserDelete(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserDelete(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserChangePassword(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserChangePassword(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserGrantRole(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserGrantRole(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.UserRevokeRole(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.UserRevokeRole(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleAdd(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleAdd(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleDelete(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleDelete(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleGrantPermission(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleGrantPermission(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.RoleRevokePermission(rctx, in, opts...) return err }) return resp, err } -func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) { - err = rac.retryf(ctx, func(rctx context.Context) error { - resp, err = rac.AuthClient.RoleRevokePermission(rctx, in, opts...) +func (rac *nonRepeatableAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) { + err = rac.nonRepeatableRetry(ctx, func(rctx context.Context) error { + resp, err = rac.ac.Authenticate(rctx, in, opts...) return err }) return resp, err diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go index a61decd6406d..1a80c8ebaabe 100644 --- a/vendor/github.com/coreos/etcd/clientv3/txn.go +++ b/vendor/github.com/coreos/etcd/clientv3/txn.go @@ -18,13 +18,14 @@ import ( "sync" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" "google.golang.org/grpc" ) // Txn is the interface that wraps mini-transactions. // -// Tx.If( +// Txn(context.TODO()).If( // Compare(Value(k1), ">", v1), // Compare(Version(k1), "=", 2) // ).Then( @@ -49,8 +50,6 @@ type Txn interface { // Commit tries to commit the transaction. Commit() (*TxnResponse, error) - - // TODO: add a Do for shortcut the txn without any condition? } type txn struct { @@ -68,6 +67,8 @@ type txn struct { sus []*pb.RequestOp fas []*pb.RequestOp + + callOpts []grpc.CallOption } func (txn *txn) If(cs ...Cmp) Txn { @@ -137,30 +138,14 @@ func (txn *txn) Else(ops ...Op) Txn { func (txn *txn) Commit() (*TxnResponse, error) { txn.mu.Lock() defer txn.mu.Unlock() - for { - resp, err := txn.commit() - if err == nil { - return resp, err - } - if isHaltErr(txn.ctx, err) { - return nil, toErr(txn.ctx, err) - } - if txn.isWrite { - return nil, toErr(txn.ctx, err) - } - } -} -func (txn *txn) commit() (*TxnResponse, error) { r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas} - var opts []grpc.CallOption - if !txn.isWrite { - opts = []grpc.CallOption{grpc.FailFast(false)} - } - resp, err := txn.kv.remote.Txn(txn.ctx, r, opts...) + var resp *pb.TxnResponse + var err error + resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...) if err != nil { - return nil, err + return nil, toErr(txn.ctx, err) } return (*TxnResponse)(resp), nil } diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go index 9b083cc94627..16a91fdff404 100644 --- a/vendor/github.com/coreos/etcd/clientv3/watch.go +++ b/vendor/github.com/coreos/etcd/clientv3/watch.go @@ -22,8 +22,12 @@ import ( v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" mvccpb "github.com/coreos/etcd/mvcc/mvccpb" + "golang.org/x/net/context" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" ) const ( @@ -39,10 +43,9 @@ type WatchChan <-chan WatchResponse type Watcher interface { // Watch watches on a key or prefix. The watched events will be returned - // through the returned channel. - // If the watch is slow or the required rev is compacted, the watch request - // might be canceled from the server-side and the chan will be closed. - // 'opts' can be: 'WithRev' and/or 'WithPrefix'. + // through the returned channel. If revisions waiting to be sent over the + // watch are compacted, then the watch will be canceled by the server, the + // client will post a compacted error watch response, and the channel will close. Watch(ctx context.Context, key string, opts ...OpOption) WatchChan // Close closes the watcher and cancels all watch requests. @@ -65,6 +68,9 @@ type WatchResponse struct { Created bool closeErr error + + // cancelReason is a reason of canceling watch + cancelReason string } // IsCreate returns true if the event tells that the key is newly created. @@ -85,6 +91,9 @@ func (wr *WatchResponse) Err() error { case wr.CompactRevision != 0: return v3rpc.ErrCompacted case wr.Canceled: + if len(wr.cancelReason) != 0 { + return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason)) + } return v3rpc.ErrFutureRev } return nil @@ -97,7 +106,8 @@ func (wr *WatchResponse) IsProgressNotify() bool { // watcher implements the Watcher interface type watcher struct { - remote pb.WatchClient + remote pb.WatchClient + callOpts []grpc.CallOption // mu protects the grpc streams map mu sync.RWMutex @@ -108,8 +118,9 @@ type watcher struct { // watchGrpcStream tracks all watch resources attached to a single grpc stream. type watchGrpcStream struct { - owner *watcher - remote pb.WatchClient + owner *watcher + remote pb.WatchClient + callOpts []grpc.CallOption // ctx controls internal remote.Watch requests ctx context.Context @@ -128,7 +139,7 @@ type watchGrpcStream struct { respc chan *pb.WatchResponse // donec closes to broadcast shutdown donec chan struct{} - // errc transmits errors from grpc Recv to the watch stream reconn logic + // errc transmits errors from grpc Recv to the watch stream reconnect logic errc chan error // closingc gets the watcherStream of closing watchers closingc chan *watcherStream @@ -180,14 +191,18 @@ type watcherStream struct { } func NewWatcher(c *Client) Watcher { - return NewWatchFromWatchClient(pb.NewWatchClient(c.conn)) + return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c) } -func NewWatchFromWatchClient(wc pb.WatchClient) Watcher { - return &watcher{ +func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher { + w := &watcher{ remote: wc, streams: make(map[string]*watchGrpcStream), } + if c != nil { + w.callOpts = c.callOpts + } + return w } // never closes @@ -206,17 +221,17 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream { wgs := &watchGrpcStream{ owner: w, remote: w.remote, + callOpts: w.callOpts, ctx: ctx, - ctxKey: fmt.Sprintf("%v", inctx), + ctxKey: streamKeyFromCtx(inctx), cancel: cancel, substreams: make(map[int64]*watcherStream), - - respc: make(chan *pb.WatchResponse), - reqc: make(chan *watchRequest), - donec: make(chan struct{}), - errc: make(chan error, 1), - closingc: make(chan *watcherStream), - resumec: make(chan struct{}), + respc: make(chan *pb.WatchResponse), + reqc: make(chan *watchRequest), + donec: make(chan struct{}), + errc: make(chan error, 1), + closingc: make(chan *watcherStream), + resumec: make(chan struct{}), } go wgs.run() return wgs @@ -247,7 +262,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch } ok := false - ctxKey := fmt.Sprintf("%v", ctx) + ctxKey := streamKeyFromCtx(ctx) // find or allocate appropriate grpc watch stream w.mu.Lock() @@ -310,14 +325,14 @@ func (w *watcher) Close() (err error) { w.streams = nil w.mu.Unlock() for _, wgs := range streams { - if werr := wgs.Close(); werr != nil { + if werr := wgs.close(); werr != nil { err = werr } } return err } -func (w *watchGrpcStream) Close() (err error) { +func (w *watchGrpcStream) close() (err error) { w.cancel() <-w.donec select { @@ -428,7 +443,7 @@ func (w *watchGrpcStream) run() { initReq: *wreq, id: -1, outc: outc, - // unbufffered so resumes won't cause repeat events + // unbuffered so resumes won't cause repeat events recvc: make(chan *WatchResponse), } @@ -480,7 +495,7 @@ func (w *watchGrpcStream) run() { req := &pb.WatchRequest{RequestUnion: cr} wc.Send(req) } - // watch client failed to recv; spawn another if possible + // watch client failed on Recv; spawn another if possible case err := <-w.errc: if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader { closeErr = err @@ -520,10 +535,6 @@ func (w *watchGrpcStream) nextResume() *watcherStream { // dispatchEvent sends a WatchResponse to the appropriate watcher stream func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { - ws, ok := w.substreams[pbresp.WatchId] - if !ok { - return false - } events := make([]*Event, len(pbresp.Events)) for i, ev := range pbresp.Events { events[i] = (*Event)(ev) @@ -534,6 +545,11 @@ func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool { CompactRevision: pbresp.CompactRevision, Created: pbresp.Created, Canceled: pbresp.Canceled, + cancelReason: pbresp.CancelReason, + } + ws, ok := w.substreams[pbresp.WatchId] + if !ok { + return false } select { case ws.recvc <- wr: @@ -725,7 +741,11 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str ws.closing = true close(ws.outc) ws.outc = nil - go func() { w.closingc <- ws }() + w.wg.Add(1) + go func() { + defer w.wg.Done() + w.closingc <- ws + }() case <-stopc: } }(w.resuming[i]) @@ -737,7 +757,7 @@ func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan str return donec } -// joinSubstream waits for all substream goroutines to complete +// joinSubstreams waits for all substream goroutines to complete. func (w *watchGrpcStream) joinSubstreams() { for _, ws := range w.substreams { <-ws.donec @@ -749,7 +769,9 @@ func (w *watchGrpcStream) joinSubstreams() { } } -// openWatchClient retries opening a watchclient until retryConnection fails +// openWatchClient retries opening a watch client until success or halt. +// manually retry in case "ws==nil && err==nil" +// TODO: remove FailFast=false func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) { for { select { @@ -760,7 +782,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) return nil, err default: } - if ws, err = w.remote.Watch(w.ctx, grpc.FailFast(false)); ws != nil && err == nil { + if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil { break } if isHaltErr(w.ctx, err) { @@ -770,7 +792,7 @@ func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) return ws, nil } -// toPB converts an internal watch request structure to its protobuf messagefunc (wr *watchRequest) +// toPB converts an internal watch request structure to its protobuf WatchRequest structure. func (wr *watchRequest) toPB() *pb.WatchRequest { req := &pb.WatchCreateRequest{ StartRevision: wr.rev, @@ -783,3 +805,10 @@ func (wr *watchRequest) toPB() *pb.WatchRequest { cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} return &pb.WatchRequest{RequestUnion: cr} } + +func streamKeyFromCtx(ctx context.Context) string { + if md, ok := metadata.FromOutgoingContext(ctx); ok { + return fmt.Sprintf("%+v", md) + } + return "" +} diff --git a/vendor/github.com/coreos/etcd/compactor/compactor.go b/vendor/github.com/coreos/etcd/compactor/compactor.go index 322a0987011c..5cf7b65094a3 100644 --- a/vendor/github.com/coreos/etcd/compactor/compactor.go +++ b/vendor/github.com/coreos/etcd/compactor/compactor.go @@ -30,7 +30,8 @@ var ( ) const ( - checkCompactionInterval = 5 * time.Minute + checkCompactionInterval = 5 * time.Minute + executeCompactionInterval = time.Hour ) type Compactable interface { @@ -41,6 +42,8 @@ type RevGetter interface { Rev() int64 } +// Periodic compacts the log by purging revisions older than +// the configured retention time. Compaction happens hourly. type Periodic struct { clock clockwork.Clock periodInHour int @@ -85,11 +88,12 @@ func (t *Periodic) Run() { continue } } - if clock.Now().Sub(last) < time.Duration(t.periodInHour)*time.Hour { + + if clock.Now().Sub(last) < executeCompactionInterval { continue } - rev := t.getRev(t.periodInHour) + rev, remaining := t.getRev(t.periodInHour) if rev < 0 { continue } @@ -97,11 +101,11 @@ func (t *Periodic) Run() { plog.Noticef("Starting auto-compaction at revision %d", rev) _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev}) if err == nil || err == mvcc.ErrCompacted { - t.revs = make([]int64, 0) + t.revs = remaining last = clock.Now() plog.Noticef("Finished auto-compaction at revision %d", rev) } else { - plog.Noticef("Failed auto-compaction at revision %d (%v)", err, rev) + plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err) plog.Noticef("Retry after %v", checkCompactionInterval) } } @@ -124,10 +128,10 @@ func (t *Periodic) Resume() { t.paused = false } -func (t *Periodic) getRev(h int) int64 { +func (t *Periodic) getRev(h int) (int64, []int64) { i := len(t.revs) - int(time.Duration(h)*time.Hour/checkCompactionInterval) if i < 0 { - return -1 + return -1, t.revs } - return t.revs[i] + return t.revs[i], t.revs[i+1:] } diff --git a/vendor/github.com/coreos/etcd/discovery/srv.go b/vendor/github.com/coreos/etcd/discovery/srv.go deleted file mode 100644 index c3d20ca92437..000000000000 --- a/vendor/github.com/coreos/etcd/discovery/srv.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package discovery - -import ( - "fmt" - "net" - "net/url" - "strings" - - "github.com/coreos/etcd/pkg/types" -) - -var ( - // indirection for testing - lookupSRV = net.LookupSRV - resolveTCPAddr = net.ResolveTCPAddr -) - -// SRVGetCluster gets the cluster information via DNS discovery. -// TODO(barakmich): Currently ignores priority and weight (as they don't make as much sense for a bootstrap) -// Also doesn't do any lookups for the token (though it could) -// Also sees each entry as a separate instance. -func SRVGetCluster(name, dns string, defaultToken string, apurls types.URLs) (string, string, error) { - tempName := int(0) - tcp2ap := make(map[string]url.URL) - - // First, resolve the apurls - for _, url := range apurls { - tcpAddr, err := resolveTCPAddr("tcp", url.Host) - if err != nil { - plog.Errorf("couldn't resolve host %s during SRV discovery", url.Host) - return "", "", err - } - tcp2ap[tcpAddr.String()] = url - } - - stringParts := []string{} - updateNodeMap := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", dns) - if err != nil { - return err - } - for _, srv := range addrs { - port := fmt.Sprintf("%d", srv.Port) - host := net.JoinHostPort(srv.Target, port) - tcpAddr, err := resolveTCPAddr("tcp", host) - if err != nil { - plog.Warningf("couldn't resolve host %s during SRV discovery", host) - continue - } - n := "" - url, ok := tcp2ap[tcpAddr.String()] - if ok { - n = name - } - if n == "" { - n = fmt.Sprintf("%d", tempName) - tempName++ - } - // SRV records have a trailing dot but URL shouldn't. - shortHost := strings.TrimSuffix(srv.Target, ".") - urlHost := net.JoinHostPort(shortHost, port) - stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) - plog.Noticef("got bootstrap from DNS for %s at %s://%s", service, scheme, urlHost) - if ok && url.Scheme != scheme { - plog.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) - } - } - return nil - } - - failCount := 0 - err := updateNodeMap("etcd-server-ssl", "https") - srvErr := make([]string, 2) - if err != nil { - srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _etcd-server-ssl %s", err) - failCount++ - } - err = updateNodeMap("etcd-server", "http") - if err != nil { - srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _etcd-server %s", err) - failCount++ - } - if failCount == 2 { - plog.Warningf(srvErr[0]) - plog.Warningf(srvErr[1]) - plog.Errorf("SRV discovery failed: too many errors querying DNS SRV records") - return "", "", err - } - return strings.Join(stringParts, ","), defaultToken, nil -} diff --git a/vendor/github.com/coreos/etcd/embed/config.go b/vendor/github.com/coreos/etcd/embed/config.go index 242145301631..90efb3937d79 100644 --- a/vendor/github.com/coreos/etcd/embed/config.go +++ b/vendor/github.com/coreos/etcd/embed/config.go @@ -21,23 +21,30 @@ import ( "net/http" "net/url" "strings" + "time" - "github.com/coreos/etcd/discovery" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/pkg/cors" "github.com/coreos/etcd/pkg/netutil" + "github.com/coreos/etcd/pkg/srv" "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" + "github.com/ghodss/yaml" + "google.golang.org/grpc" ) const ( ClusterStateFlagNew = "new" ClusterStateFlagExisting = "existing" - DefaultName = "default" - DefaultMaxSnapshots = 5 - DefaultMaxWALs = 5 + DefaultName = "default" + DefaultMaxSnapshots = 5 + DefaultMaxWALs = 5 + DefaultMaxRequestBytes = 1.5 * 1024 * 1024 + DefaultGRPCKeepAliveMinTime = 5 * time.Second + DefaultGRPCKeepAliveInterval = 2 * time.Hour + DefaultGRPCKeepAliveTimeout = 20 * time.Second DefaultListenPeerURLs = "http://localhost:2380" DefaultListenClientURLs = "http://localhost:2379" @@ -83,6 +90,24 @@ type Config struct { TickMs uint `json:"heartbeat-interval"` ElectionMs uint `json:"election-timeout"` QuotaBackendBytes int64 `json:"quota-backend-bytes"` + MaxRequestBytes uint `json:"max-request-bytes"` + + // gRPC server options + + // GRPCKeepAliveMinTime is the minimum interval that a client should + // wait before pinging server. When client pings "too fast", server + // sends goaway and closes the connection (errors: too_many_pings, + // http2.ErrCodeEnhanceYourCalm). When too slow, nothing happens. + // Server expects client pings only when there is any active streams + // (PermitWithoutStream is set false). + GRPCKeepAliveMinTime time.Duration `json:"grpc-keepalive-min-time"` + // GRPCKeepAliveInterval is the frequency of server-to-client ping + // to check if a connection is alive. Close a non-responsive connection + // after an additional duration of Timeout. 0 to disable. + GRPCKeepAliveInterval time.Duration `json:"grpc-keepalive-interval"` + // GRPCKeepAliveTimeout is the additional duration of wait + // before closing a non-responsive connection. 0 to disable. + GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"` // clustering @@ -94,6 +119,7 @@ type Config struct { InitialCluster string `json:"initial-cluster"` InitialClusterToken string `json:"initial-cluster-token"` StrictReconfigCheck bool `json:"strict-reconfig-check"` + EnableV2 bool `json:"enable-v2"` // security @@ -106,7 +132,7 @@ type Config struct { Debug bool `json:"debug"` LogPkgLevels string `json:"log-package-levels"` - EnablePprof bool + EnablePprof bool `json:"enable-pprof"` Metrics string `json:"metrics"` // ForceNewCluster starts a new cluster even if previously started; unsafe. @@ -117,6 +143,18 @@ type Config struct { // The map key is the route path for the handler, and // you must ensure it can't be conflicted with etcd's. UserHandlers map[string]http.Handler `json:"-"` + // ServiceRegister is for registering users' gRPC services. A simple usage example: + // cfg := embed.NewConfig() + // cfg.ServerRegister = func(s *grpc.Server) { + // pb.RegisterFooServer(s, &fooServer{}) + // pb.RegisterBarServer(s, &barServer{}) + // } + // embed.StartEtcd(cfg) + ServiceRegister func(*grpc.Server) `json:"-"` + + // auth + + AuthToken string `json:"auth-token"` } // configYAML holds the config suitable for yaml parsing @@ -152,21 +190,27 @@ func NewConfig() *Config { lcurl, _ := url.Parse(DefaultListenClientURLs) acurl, _ := url.Parse(DefaultAdvertiseClientURLs) cfg := &Config{ - CorsInfo: &cors.CORSInfo{}, - MaxSnapFiles: DefaultMaxSnapshots, - MaxWalFiles: DefaultMaxWALs, - Name: DefaultName, - SnapCount: etcdserver.DefaultSnapCount, - TickMs: 100, - ElectionMs: 1000, - LPUrls: []url.URL{*lpurl}, - LCUrls: []url.URL{*lcurl}, - APUrls: []url.URL{*apurl}, - ACUrls: []url.URL{*acurl}, - ClusterState: ClusterStateFlagNew, - InitialClusterToken: "etcd-cluster", - StrictReconfigCheck: true, - Metrics: "basic", + CorsInfo: &cors.CORSInfo{}, + MaxSnapFiles: DefaultMaxSnapshots, + MaxWalFiles: DefaultMaxWALs, + Name: DefaultName, + SnapCount: etcdserver.DefaultSnapCount, + MaxRequestBytes: DefaultMaxRequestBytes, + GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime, + GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval, + GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout, + TickMs: 100, + ElectionMs: 1000, + LPUrls: []url.URL{*lpurl}, + LCUrls: []url.URL{*lcurl}, + APUrls: []url.URL{*apurl}, + ACUrls: []url.URL{*acurl}, + ClusterState: ClusterStateFlagNew, + InitialClusterToken: "etcd-cluster", + StrictReconfigCheck: true, + Metrics: "basic", + EnableV2: true, + AuthToken: "simple", } cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) return cfg @@ -186,6 +230,8 @@ func (cfg *configYAML) configFromFile(path string) error { return err } + defaultInitialCluster := cfg.InitialCluster + err = yaml.Unmarshal(b, cfg) if err != nil { return err @@ -229,7 +275,8 @@ func (cfg *configYAML) configFromFile(path string) error { cfg.ACUrls = []url.URL(u) } - if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == cfg.InitialClusterFromName(cfg.Name) { + // If a discovery flag is set, clear default initial cluster set by InitialClusterFromName + if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == defaultInitialCluster { cfg.InitialCluster = "" } if cfg.ClusterState == "" { @@ -292,6 +339,7 @@ func (cfg *Config) Validate() error { // PeerURLsMapAndToken sets up an initial peer URLsMap and cluster token for bootstrap or discovery. func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, token string, err error) { + token = cfg.InitialClusterToken switch { case cfg.Durl != "": urlsmap = types.URLsMap{} @@ -300,11 +348,15 @@ func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, tok urlsmap[cfg.Name] = cfg.APUrls token = cfg.Durl case cfg.DNSCluster != "": - var clusterStr string - clusterStr, token, err = discovery.SRVGetCluster(cfg.Name, cfg.DNSCluster, cfg.InitialClusterToken, cfg.APUrls) - if err != nil { - return nil, "", err + clusterStrs, cerr := srv.GetCluster("etcd-server", cfg.Name, cfg.DNSCluster, cfg.APUrls) + if cerr != nil { + plog.Errorf("couldn't resolve during SRV discovery (%v)", cerr) + return nil, "", cerr + } + for _, s := range clusterStrs { + plog.Noticef("got bootstrap from DNS for etcd-server at %s", s) } + clusterStr := strings.Join(clusterStrs, ",") if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.CAFile == "" { cfg.PeerTLSInfo.ServerName = cfg.DNSCluster } @@ -319,7 +371,6 @@ func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, tok default: // We're statically configured, and cluster has appropriately been set. urlsmap, err = types.NewURLsMap(cfg.InitialCluster) - token = cfg.InitialClusterToken } return urlsmap, token, err } @@ -367,7 +418,7 @@ func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (s } used := false - pip, pport, _ := net.SplitHostPort(cfg.LPUrls[0].Host) + pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port() if cfg.defaultPeerHost() && pip == "0.0.0.0" { cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)} used = true @@ -377,7 +428,7 @@ func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (s cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) } - cip, cport, _ := net.SplitHostPort(cfg.LCUrls[0].Host) + cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port() if cfg.defaultClientHost() && cip == "0.0.0.0" { cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)} used = true @@ -406,8 +457,7 @@ func checkBindURLs(urls []url.URL) error { continue } if net.ParseIP(host) == nil { - err := fmt.Errorf("expected IP in URL for binding (%s)", url.String()) - plog.Warning(err) + return fmt.Errorf("expected IP in URL for binding (%s)", url.String()) } } return nil diff --git a/vendor/github.com/coreos/etcd/embed/etcd.go b/vendor/github.com/coreos/etcd/embed/etcd.go index a33a85546259..2f500f9f13b7 100644 --- a/vendor/github.com/coreos/etcd/embed/etcd.go +++ b/vendor/github.com/coreos/etcd/embed/etcd.go @@ -15,20 +15,32 @@ package embed import ( + "context" "crypto/tls" "fmt" + "io/ioutil" + defaultLog "log" "net" "net/http" "path/filepath" + "sync" + "time" "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http" + "github.com/coreos/etcd/etcdserver/api/v3rpc" "github.com/coreos/etcd/pkg/cors" + "github.com/coreos/etcd/pkg/debugutil" runtimeutil "github.com/coreos/etcd/pkg/runtime" "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/rafthttp" + + "github.com/cockroachdb/cmux" "github.com/coreos/pkg/capnslog" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" ) var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "embed") @@ -49,13 +61,24 @@ const ( // Etcd contains a running etcd server and its listeners. type Etcd struct { - Peers []net.Listener + Peers []*peerListener Clients []net.Listener - Server *etcdserver.EtcdServer + // a map of contexts for the servers that serves client requests. + sctxs map[string]*serveCtx + + Server *etcdserver.EtcdServer cfg Config + stopc chan struct{} errc chan error - sctxs map[string]*serveCtx + + closeOnce sync.Once +} + +type peerListener struct { + net.Listener + serve func() error + close func(context.Context) error } // StartEtcd launches the etcd server and HTTP handlers for client/server communication. @@ -65,20 +88,28 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { if err = inCfg.Validate(); err != nil { return nil, err } - e = &Etcd{cfg: *inCfg} + serving := false + e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})} cfg := &e.cfg defer func() { - if e != nil && err != nil { - e.Close() - e = nil + if e == nil || err == nil { + return + } + if !serving { + // errored before starting gRPC server for serveCtx.serversC + for _, sctx := range e.sctxs { + close(sctx.serversC) + } } + e.Close() + e = nil }() if e.Peers, err = startPeerListeners(cfg); err != nil { - return + return e, err } if e.sctxs, err = startClientListeners(cfg); err != nil { - return + return e, err } for _, sctx := range e.sctxs { e.Clients = append(e.Clients, sctx.l) @@ -116,22 +147,31 @@ func StartEtcd(inCfg *Config) (e *Etcd, err error) { ElectionTicks: cfg.ElectionTicks(), AutoCompactionRetention: cfg.AutoCompactionRetention, QuotaBackendBytes: cfg.QuotaBackendBytes, + MaxRequestBytes: cfg.MaxRequestBytes, StrictReconfigCheck: cfg.StrictReconfigCheck, ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth, + AuthToken: cfg.AuthToken, + Debug: cfg.Debug, } if e.Server, err = etcdserver.NewServer(srvcfg); err != nil { - return + return e, err } // buffer channel so goroutines on closed connections won't wait forever e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs)) e.Server.Start() - if err = e.serve(); err != nil { - return + + if err = e.servePeers(); err != nil { + return e, err + } + if err = e.serveClients(); err != nil { + return e, err } - return + + serving = true + return e, nil } // Config returns the current configuration. @@ -139,28 +179,90 @@ func (e *Etcd) Config() Config { return e.cfg } +// Close gracefully shuts down all servers/listeners. +// Client requests will be terminated with request timeout. +// After timeout, enforce remaning requests be closed immediately. func (e *Etcd) Close() { - for _, sctx := range e.sctxs { - sctx.cancel() + e.closeOnce.Do(func() { close(e.stopc) }) + + // close client requests with request timeout + timeout := 2 * time.Second + if e.Server != nil { + timeout = e.Server.Cfg.ReqTimeout() } - for i := range e.Peers { - if e.Peers[i] != nil { - e.Peers[i].Close() + for _, sctx := range e.sctxs { + for ss := range sctx.serversC { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + stopServers(ctx, ss) + cancel() } } + + for _, sctx := range e.sctxs { + sctx.cancel() + } + for i := range e.Clients { if e.Clients[i] != nil { e.Clients[i].Close() } } + + // close rafthttp transports if e.Server != nil { e.Server.Stop() } + + // close all idle connections in peer handler (wait up to 1-second) + for i := range e.Peers { + if e.Peers[i] != nil && e.Peers[i].close != nil { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + e.Peers[i].close(ctx) + cancel() + } + } +} + +func stopServers(ctx context.Context, ss *servers) { + shutdownNow := func() { + // first, close the http.Server + ss.http.Shutdown(ctx) + // then close grpc.Server; cancels all active RPCs + ss.grpc.Stop() + } + + // do not grpc.Server.GracefulStop with TLS enabled etcd server + // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531 + // and https://github.com/coreos/etcd/issues/8916 + if ss.secure { + shutdownNow() + return + } + + ch := make(chan struct{}) + go func() { + defer close(ch) + // close listeners to stop accepting new connections, + // will block on any existing transports + ss.grpc.GracefulStop() + }() + + // wait until all pending RPCs are finished + select { + case <-ch: + case <-ctx.Done(): + // took too long, manually close open transports + // e.g. watch streams + shutdownNow() + + // concurrent GracefulStop should be interrupted + <-ch + } } func (e *Etcd) Err() <-chan error { return e.errc } -func startPeerListeners(cfg *Config) (plns []net.Listener, err error) { +func startPeerListeners(cfg *Config) (peers []*peerListener, err error) { if cfg.PeerAutoTLS && cfg.PeerTLSInfo.Empty() { phosts := make([]string, len(cfg.LPUrls)) for i, u := range cfg.LPUrls { @@ -178,22 +280,22 @@ func startPeerListeners(cfg *Config) (plns []net.Listener, err error) { plog.Infof("peerTLS: %s", cfg.PeerTLSInfo) } - plns = make([]net.Listener, len(cfg.LPUrls)) + peers = make([]*peerListener, len(cfg.LPUrls)) defer func() { if err == nil { return } - for i := range plns { - if plns[i] == nil { - continue + for i := range peers { + if peers[i] != nil && peers[i].close != nil { + plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String()) + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + peers[i].close(ctx) + cancel() } - plns[i].Close() - plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String()) } }() for i, u := range cfg.LPUrls { - var tlscfg *tls.Config if u.Scheme == "http" { if !cfg.PeerTLSInfo.Empty() { plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String()) @@ -202,17 +304,57 @@ func startPeerListeners(cfg *Config) (plns []net.Listener, err error) { plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String()) } } - if !cfg.PeerTLSInfo.Empty() { - if tlscfg, err = cfg.PeerTLSInfo.ServerConfig(); err != nil { - return nil, err - } - } - if plns[i], err = rafthttp.NewListener(u, tlscfg); err != nil { + peers[i] = &peerListener{close: func(context.Context) error { return nil }} + peers[i].Listener, err = rafthttp.NewListener(u, &cfg.PeerTLSInfo) + if err != nil { return nil, err } + // once serve, overwrite with 'http.Server.Shutdown' + peers[i].close = func(context.Context) error { + return peers[i].Listener.Close() + } plog.Info("listening for peers on ", u.String()) } - return plns, nil + return peers, nil +} + +// configure peer handlers after rafthttp.Transport started +func (e *Etcd) servePeers() (err error) { + ph := etcdhttp.NewPeerHandler(e.Server) + var peerTLScfg *tls.Config + if !e.cfg.PeerTLSInfo.Empty() { + if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil { + return err + } + } + + for _, p := range e.Peers { + gs := v3rpc.Server(e.Server, peerTLScfg) + m := cmux.New(p.Listener) + go gs.Serve(m.Match(cmux.HTTP2())) + srv := &http.Server{ + Handler: grpcHandlerFunc(gs, ph), + ReadTimeout: 5 * time.Minute, + ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error + } + go srv.Serve(m.Match(cmux.Any())) + p.serve = func() error { return m.Serve() } + p.close = func(ctx context.Context) error { + // gracefully shutdown http.Server + // close open listeners, idle connections + // until context cancel or time-out + stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: gs, http: srv}) + return nil + } + } + + // start peer servers in a goroutine + for _, pl := range e.Peers { + go func(l *peerListener) { + e.errHandler(l.serve()) + }(pl) + } + return nil } func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { @@ -230,7 +372,7 @@ func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { } if cfg.EnablePprof { - plog.Infof("pprof is enabled under %s", pprofPrefix) + plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf) } sctxs = make(map[string]*serveCtx) @@ -250,21 +392,26 @@ func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { } proto := "tcp" + addr := u.Host if u.Scheme == "unix" || u.Scheme == "unixs" { proto = "unix" + addr = u.Host + u.Path } sctx.secure = u.Scheme == "https" || u.Scheme == "unixs" sctx.insecure = !sctx.secure - if oldctx := sctxs[u.Host]; oldctx != nil { + if oldctx := sctxs[addr]; oldctx != nil { oldctx.secure = oldctx.secure || sctx.secure oldctx.insecure = oldctx.insecure || sctx.insecure continue } - if sctx.l, err = net.Listen(proto, u.Host); err != nil { + if sctx.l, err = net.Listen(proto, addr); err != nil { return nil, err } + // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking + // hosts that disable ipv6. So, use the address given by the user. + sctx.addr = addr if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil { if fdLimit <= reservedInternalFDNum { @@ -289,15 +436,19 @@ func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) { for k := range cfg.UserHandlers { sctx.userHandlers[k] = cfg.UserHandlers[k] } - if cfg.EnablePprof { + sctx.serviceRegister = cfg.ServiceRegister + if cfg.EnablePprof || cfg.Debug { sctx.registerPprof() } - sctxs[u.Host] = sctx + if cfg.Debug { + sctx.registerTrace() + } + sctxs[addr] = sctx } return sctxs, nil } -func (e *Etcd) serve() (err error) { +func (e *Etcd) serveClients() (err error) { var ctlscfg *tls.Config if !e.cfg.ClientTLSInfo.Empty() { plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo) @@ -310,25 +461,49 @@ func (e *Etcd) serve() (err error) { plog.Infof("cors = %s", e.cfg.CorsInfo) } - // Start the peer server in a goroutine - ph := v2http.NewPeerHandler(e.Server) - for _, l := range e.Peers { - go func(l net.Listener) { - e.errc <- servePeerHTTP(l, ph) - }(l) + // Start a client server goroutine for each listen address + var h http.Handler + if e.Config().EnableV2 { + h = v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout()) + } else { + mux := http.NewServeMux() + etcdhttp.HandleBasic(mux, e.Server) + h = mux + } + h = http.Handler(&cors.CORSHandler{Handler: h, Info: e.cfg.CorsInfo}) + + gopts := []grpc.ServerOption{} + if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) { + gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: e.cfg.GRPCKeepAliveMinTime, + PermitWithoutStream: false, + })) + } + if e.cfg.GRPCKeepAliveInterval > time.Duration(0) && + e.cfg.GRPCKeepAliveTimeout > time.Duration(0) { + gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: e.cfg.GRPCKeepAliveInterval, + Timeout: e.cfg.GRPCKeepAliveTimeout, + })) } - // Start a client server goroutine for each listen address - ch := http.Handler(&cors.CORSHandler{ - Handler: v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout()), - Info: e.cfg.CorsInfo, - }) + // start client servers in a goroutine for _, sctx := range e.sctxs { - // read timeout does not work with http close notify - // TODO: https://github.com/golang/go/issues/9524 go func(s *serveCtx) { - e.errc <- s.serve(e.Server, ctlscfg, ch, e.errc) + e.errHandler(s.serve(e.Server, ctlscfg, h, e.errHandler, gopts...)) }(sctx) } return nil } + +func (e *Etcd) errHandler(err error) { + select { + case <-e.stopc: + return + default: + } + select { + case <-e.stopc: + case e.errc <- err: + } +} diff --git a/vendor/github.com/coreos/etcd/embed/serve.go b/vendor/github.com/coreos/etcd/embed/serve.go index 84b97615c617..b659bf8b7d68 100644 --- a/vendor/github.com/coreos/etcd/embed/serve.go +++ b/vendor/github.com/coreos/etcd/embed/serve.go @@ -20,58 +20,87 @@ import ( defaultLog "log" "net" "net/http" - "net/http/pprof" "strings" - "time" "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/v3client" + "github.com/coreos/etcd/etcdserver/api/v3election" + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + v3electiongw "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw" + "github.com/coreos/etcd/etcdserver/api/v3lock" + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" + v3lockgw "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw" "github.com/coreos/etcd/etcdserver/api/v3rpc" - pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/pkg/transport" + etcdservergw "github.com/coreos/etcd/etcdserver/etcdserverpb/gw" + "github.com/coreos/etcd/pkg/debugutil" "github.com/cockroachdb/cmux" gw "github.com/grpc-ecosystem/grpc-gateway/runtime" "golang.org/x/net/context" + "golang.org/x/net/trace" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) -const pprofPrefix = "/debug/pprof" - type serveCtx struct { l net.Listener + addr string secure bool insecure bool ctx context.Context cancel context.CancelFunc - userHandlers map[string]http.Handler + userHandlers map[string]http.Handler + serviceRegister func(*grpc.Server) + serversC chan *servers +} + +type servers struct { + secure bool + grpc *grpc.Server + http *http.Server } func newServeCtx() *serveCtx { ctx, cancel := context.WithCancel(context.Background()) - return &serveCtx{ctx: ctx, cancel: cancel, userHandlers: make(map[string]http.Handler)} + return &serveCtx{ + ctx: ctx, + cancel: cancel, + userHandlers: make(map[string]http.Handler), + serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true + } } // serve accepts incoming connections on the listener l, // creating a new service goroutine for each. The service goroutines // read requests and then call handler to reply to them. -func (sctx *serveCtx) serve(s *etcdserver.EtcdServer, tlscfg *tls.Config, handler http.Handler, errc chan<- error) error { +func (sctx *serveCtx) serve( + s *etcdserver.EtcdServer, + tlscfg *tls.Config, + handler http.Handler, + errHandler func(error), + gopts ...grpc.ServerOption) error { logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) <-s.ReadyNotify() plog.Info("ready to serve client requests") m := cmux.New(sctx.l) + v3c := v3client.New(s) + servElection := v3election.NewElectionServer(v3c) + servLock := v3lock.NewLockServer(v3c) if sctx.insecure { - gs := v3rpc.Server(s, nil) + gs := v3rpc.Server(s, nil, gopts...) + v3electionpb.RegisterElectionServer(gs, servElection) + v3lockpb.RegisterLockServer(gs, servLock) + if sctx.serviceRegister != nil { + sctx.serviceRegister(gs) + } grpcl := m.Match(cmux.HTTP2()) - go func() { errc <- gs.Serve(grpcl) }() + go func() { errHandler(gs.Serve(grpcl)) }() - opts := []grpc.DialOption{ - grpc.WithInsecure(), - } + opts := []grpc.DialOption{grpc.WithInsecure()} gwmux, err := sctx.registerGateway(opts) if err != nil { return err @@ -84,15 +113,22 @@ func (sctx *serveCtx) serve(s *etcdserver.EtcdServer, tlscfg *tls.Config, handle ErrorLog: logger, // do not log user error } httpl := m.Match(cmux.HTTP1()) - go func() { errc <- srvhttp.Serve(httpl) }() + go func() { errHandler(srvhttp.Serve(httpl)) }() + + sctx.serversC <- &servers{grpc: gs, http: srvhttp} plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.l.Addr().String()) } if sctx.secure { - gs := v3rpc.Server(s, tlscfg) + gs := v3rpc.Server(s, tlscfg, gopts...) + v3electionpb.RegisterElectionServer(gs, servElection) + v3lockpb.RegisterLockServer(gs, servLock) + if sctx.serviceRegister != nil { + sctx.serviceRegister(gs) + } handler = grpcHandlerFunc(gs, handler) - dtls := transport.ShallowCopyTLSConfig(tlscfg) + dtls := tlscfg.Clone() // trust local server dtls.InsecureSkipVerify = true creds := credentials.NewTLS(dtls) @@ -111,17 +147,24 @@ func (sctx *serveCtx) serve(s *etcdserver.EtcdServer, tlscfg *tls.Config, handle TLSConfig: tlscfg, ErrorLog: logger, // do not log user error } - go func() { errc <- srv.Serve(tlsl) }() + go func() { errHandler(srv.Serve(tlsl)) }() + sctx.serversC <- &servers{secure: true, grpc: gs, http: srv} plog.Infof("serving client requests on %s", sctx.l.Addr().String()) } + close(sctx.serversC) return m.Serve() } // grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC // connections or otherHandler otherwise. Copied from cockroachdb. func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler { + if otherHandler == nil { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + grpcServer.ServeHTTP(w, r) + }) + } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") { grpcServer.ServeHTTP(w, r) @@ -131,46 +174,38 @@ func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Ha }) } -func servePeerHTTP(l net.Listener, handler http.Handler) error { - logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0) - // TODO: add debug flag; enable logging when debug flag is set - srv := &http.Server{ - Handler: handler, - ReadTimeout: 5 * time.Minute, - ErrorLog: logger, // do not log user error - } - return srv.Serve(l) -} +type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) { ctx := sctx.ctx - addr := sctx.l.Addr().String() - gwmux := gw.NewServeMux() - - err := pb.RegisterKVHandlerFromEndpoint(ctx, gwmux, addr, opts) - if err != nil { - return nil, err - } - err = pb.RegisterWatchHandlerFromEndpoint(ctx, gwmux, addr, opts) - if err != nil { - return nil, err - } - err = pb.RegisterLeaseHandlerFromEndpoint(ctx, gwmux, addr, opts) + conn, err := grpc.DialContext(ctx, sctx.addr, opts...) if err != nil { return nil, err } - err = pb.RegisterClusterHandlerFromEndpoint(ctx, gwmux, addr, opts) - if err != nil { - return nil, err - } - err = pb.RegisterMaintenanceHandlerFromEndpoint(ctx, gwmux, addr, opts) - if err != nil { - return nil, err + gwmux := gw.NewServeMux() + + handlers := []registerHandlerFunc{ + etcdservergw.RegisterKVHandler, + etcdservergw.RegisterWatchHandler, + etcdservergw.RegisterLeaseHandler, + etcdservergw.RegisterClusterHandler, + etcdservergw.RegisterMaintenanceHandler, + etcdservergw.RegisterAuthHandler, + v3lockgw.RegisterLockHandler, + v3electiongw.RegisterElectionHandler, } - err = pb.RegisterAuthHandlerFromEndpoint(ctx, gwmux, addr, opts) - if err != nil { - return nil, err + for _, h := range handlers { + if err := h(ctx, gwmux, conn); err != nil { + return nil, err + } } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + plog.Warningf("failed to close conn to %s: %v", sctx.l.Addr().String(), cerr) + } + }() + return gwmux, nil } @@ -181,26 +216,29 @@ func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http. } httpmux.Handle("/v3alpha/", gwmux) - httpmux.Handle("/", handler) + if handler != nil { + httpmux.Handle("/", handler) + } return httpmux } +func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) { + if sctx.userHandlers[s] != nil { + plog.Warningf("path %s already registered by user handler", s) + return + } + sctx.userHandlers[s] = h +} + func (sctx *serveCtx) registerPprof() { - f := func(s string, h http.Handler) { - if sctx.userHandlers[s] != nil { - plog.Warningf("path %s already registered by user handler", s) - return - } - sctx.userHandlers[s] = h + for p, h := range debugutil.PProfHandlers() { + sctx.registerUserHandler(p, h) } - f(pprofPrefix+"/", http.HandlerFunc(pprof.Index)) - f(pprofPrefix+"/profile", http.HandlerFunc(pprof.Profile)) - f(pprofPrefix+"/symbol", http.HandlerFunc(pprof.Symbol)) - f(pprofPrefix+"/cmdline", http.HandlerFunc(pprof.Cmdline)) - f(pprofPrefix+"/trace", http.HandlerFunc(pprof.Trace)) - - f(pprofPrefix+"/heap", pprof.Handler("heap")) - f(pprofPrefix+"/goroutine", pprof.Handler("goroutine")) - f(pprofPrefix+"/threadcreate", pprof.Handler("threadcreate")) - f(pprofPrefix+"/block", pprof.Handler("block")) +} + +func (sctx *serveCtx) registerTrace() { + reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) } + sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf)) + evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) } + sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf)) } diff --git a/vendor/github.com/coreos/etcd/error/error.go b/vendor/github.com/coreos/etcd/error/error.go index 8cf83cc716af..b541a628b87f 100644 --- a/vendor/github.com/coreos/etcd/error/error.go +++ b/vendor/github.com/coreos/etcd/error/error.go @@ -154,9 +154,10 @@ func (e Error) StatusCode() int { return status } -func (e Error) WriteTo(w http.ResponseWriter) { +func (e Error) WriteTo(w http.ResponseWriter) error { w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index)) w.Header().Set("Content-Type", "application/json") w.WriteHeader(e.StatusCode()) - fmt.Fprintln(w, e.toJsonString()) + _, err := w.Write([]byte(e.toJsonString() + "\n")) + return err } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go index ab8cee7cf89c..5e2de58e9a10 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go @@ -33,11 +33,10 @@ var ( plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api") // capabilityMaps is a static map of version to capability map. - // the base capabilities is the set of capability 2.0 supports. capabilityMaps = map[string]map[Capability]bool{ - "2.3.0": {AuthCapability: true}, "3.0.0": {AuthCapability: true, V3rpcCapability: true}, "3.1.0": {AuthCapability: true, V3rpcCapability: true}, + "3.2.0": {AuthCapability: true, V3rpcCapability: true}, } enableMapMu sync.RWMutex @@ -48,7 +47,10 @@ var ( ) func init() { - enabledMap = make(map[Capability]bool) + enabledMap = map[Capability]bool{ + AuthCapability: true, + V3rpcCapability: true, + } } // UpdateCapability updates the enabledMap when the cluster version increases. diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go new file mode 100644 index 000000000000..283b32dbf956 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go @@ -0,0 +1,186 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdhttp + +import ( + "encoding/json" + "expvar" + "fmt" + "net/http" + "strings" + "time" + + etcdErr "github.com/coreos/etcd/error" + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api" + "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" + "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/pkg/logutil" + "github.com/coreos/etcd/raft" + "github.com/coreos/etcd/version" + "github.com/coreos/pkg/capnslog" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/net/context" +) + +var ( + plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/etcdhttp") + mlog = logutil.NewMergeLogger(plog) +) + +const ( + configPath = "/config" + metricsPath = "/metrics" + healthPath = "/health" + varsPath = "/debug/vars" + versionPath = "/version" +) + +// HandleBasic adds handlers to a mux for serving JSON etcd client requests +// that do not access the v2 store. +func HandleBasic(mux *http.ServeMux, server *etcdserver.EtcdServer) { + mux.HandleFunc(varsPath, serveVars) + mux.HandleFunc(configPath+"/local/log", logHandleFunc) + mux.Handle(metricsPath, prometheus.Handler()) + mux.Handle(healthPath, healthHandler(server)) + mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) +} + +func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "GET") { + return + } + if uint64(server.Leader()) == raft.None { + http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) + return + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil { + http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) + return + } + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{"health": "true"}`)) + } +} + +func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + v := c.Version() + if v != nil { + fn(w, r, v.String()) + } else { + fn(w, r, "not_decided") + } + } +} + +func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) { + if !allowMethod(w, r, "GET") { + return + } + vs := version.Versions{ + Server: version.Version, + Cluster: clusterV, + } + + w.Header().Set("Content-Type", "application/json") + b, err := json.Marshal(&vs) + if err != nil { + plog.Panicf("cannot marshal versions to json (%v)", err) + } + w.Write(b) +} + +func logHandleFunc(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "PUT") { + return + } + + in := struct{ Level string }{} + + d := json.NewDecoder(r.Body) + if err := d.Decode(&in); err != nil { + WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body")) + return + } + + logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level)) + if err != nil { + WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level)) + return + } + + plog.Noticef("globalLogLevel set to %q", logl.String()) + capnslog.SetGlobalLogLevel(logl) + w.WriteHeader(http.StatusNoContent) +} + +func serveVars(w http.ResponseWriter, r *http.Request) { + if !allowMethod(w, r, "GET") { + return + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + fmt.Fprintf(w, "{\n") + first := true + expvar.Do(func(kv expvar.KeyValue) { + if !first { + fmt.Fprintf(w, ",\n") + } + first = false + fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) + }) + fmt.Fprintf(w, "\n}\n") +} + +func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool { + if m == r.Method { + return true + } + w.Header().Set("Allow", m) + http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) + return false +} + +// WriteError logs and writes the given Error to the ResponseWriter +// If Error is an etcdErr, it is rendered to the ResponseWriter +// Otherwise, it is assumed to be a StatusInternalServerError +func WriteError(w http.ResponseWriter, r *http.Request, err error) { + if err == nil { + return + } + switch e := err.(type) { + case *etcdErr.Error: + e.WriteTo(w) + case *httptypes.HTTPError: + if et := e.WriteTo(w); et != nil { + plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) + } + default: + switch err { + case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy: + mlog.MergeError(err) + default: + mlog.MergeErrorf("got unexpected response error (%v)", err) + } + herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") + if et := herr.WriteTo(w); et != nil { + plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) + } + } +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go similarity index 97% rename from vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go rename to vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go index a1abadba8e72..721bae3c6000 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/peer.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package v2http +package etcdhttp import ( "encoding/json" @@ -61,7 +61,7 @@ type peerMembersHandler struct { } func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { + if !allowMethod(w, r, "GET") { return } w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String()) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go index 038f5417e67e..aa1e71ec3291 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go @@ -17,7 +17,6 @@ package v2http import ( "encoding/json" "errors" - "expvar" "fmt" "io/ioutil" "net/http" @@ -30,38 +29,36 @@ import ( etcdErr "github.com/coreos/etcd/error" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" "github.com/coreos/etcd/etcdserver/auth" "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/etcdserver/membership" "github.com/coreos/etcd/etcdserver/stats" "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft" "github.com/coreos/etcd/store" - "github.com/coreos/etcd/version" - "github.com/coreos/pkg/capnslog" "github.com/jonboulle/clockwork" - "github.com/prometheus/client_golang/prometheus" "golang.org/x/net/context" ) const ( - authPrefix = "/v2/auth" - keysPrefix = "/v2/keys" - deprecatedMachinesPrefix = "/v2/machines" - membersPrefix = "/v2/members" - statsPrefix = "/v2/stats" - varsPath = "/debug/vars" - metricsPath = "/metrics" - healthPath = "/health" - versionPath = "/version" - configPath = "/config" + authPrefix = "/v2/auth" + keysPrefix = "/v2/keys" + machinesPrefix = "/v2/machines" + membersPrefix = "/v2/members" + statsPrefix = "/v2/stats" ) // NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests. func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http.Handler { - sec := auth.NewStore(server, timeout) + mux := http.NewServeMux() + etcdhttp.HandleBasic(mux, server) + handleV2(mux, server, timeout) + return requestLogger(mux) +} +func handleV2(mux *http.ServeMux, server *etcdserver.EtcdServer, timeout time.Duration) { + sec := auth.NewStore(server, timeout) kh := &keysHandler{ sec: sec, server: server, @@ -84,34 +81,23 @@ func NewClientHandler(server *etcdserver.EtcdServer, timeout time.Duration) http clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, } - dmh := &deprecatedMachinesHandler{ - cluster: server.Cluster(), - } + mah := &machinesHandler{cluster: server.Cluster()} sech := &authHandler{ sec: sec, cluster: server.Cluster(), clientCertAuthEnabled: server.Cfg.ClientCertAuthEnabled, } - - mux := http.NewServeMux() mux.HandleFunc("/", http.NotFound) - mux.Handle(healthPath, healthHandler(server)) - mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion)) mux.Handle(keysPrefix, kh) mux.Handle(keysPrefix+"/", kh) mux.HandleFunc(statsPrefix+"/store", sh.serveStore) mux.HandleFunc(statsPrefix+"/self", sh.serveSelf) mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader) - mux.HandleFunc(varsPath, serveVars) - mux.HandleFunc(configPath+"/local/log", logHandleFunc) - mux.Handle(metricsPath, prometheus.Handler()) mux.Handle(membersPrefix, mh) mux.Handle(membersPrefix+"/", mh) - mux.Handle(deprecatedMachinesPrefix, dmh) + mux.Handle(machinesPrefix, mah) handleAuth(mux, sech) - - return requestLogger(mux) } type keysHandler struct { @@ -170,11 +156,11 @@ func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } -type deprecatedMachinesHandler struct { +type machinesHandler struct { cluster api.Cluster } -func (h *deprecatedMachinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (h *machinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !allowMethod(w, r.Method, "GET", "HEAD") { return } @@ -234,7 +220,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } now := h.clock.Now() m := membership.NewMember("", req.PeerURLs, "", &now) - err := h.server.AddMember(ctx, *m) + _, err := h.server.AddMember(ctx, *m) switch { case err == membership.ErrIDExists || err == membership.ErrPeerURLexists: writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error())) @@ -255,7 +241,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if !ok { return } - err := h.server.RemoveMember(ctx, uint64(id)) + _, err := h.server.RemoveMember(ctx, uint64(id)) switch { case err == membership.ErrIDRemoved: writeError(w, r, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id))) @@ -280,7 +266,7 @@ func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ID: id, RaftAttributes: membership.RaftAttributes{PeerURLs: req.PeerURLs.StringSlice()}, } - err := h.server.UpdateMember(ctx, m) + _, err := h.server.UpdateMember(ctx, m) switch { case err == membership.ErrPeerURLexists: writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error())) @@ -321,103 +307,13 @@ func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) { } stats := h.stats.LeaderStats() if stats == nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader")) + etcdhttp.WriteError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader")) return } w.Header().Set("Content-Type", "application/json") w.Write(stats) } -func serveVars(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} - -func healthHandler(server *etcdserver.EtcdServer) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "GET") { - return - } - if uint64(server.Leader()) == raft.None { - http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) - return - } - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - if _, err := server.Do(ctx, etcdserverpb.Request{Method: "QGET"}); err != nil { - http.Error(w, `{"health": "false"}`, http.StatusServiceUnavailable) - return - } - w.WriteHeader(http.StatusOK) - w.Write([]byte(`{"health": "true"}`)) - } -} - -func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - v := c.Version() - if v != nil { - fn(w, r, v.String()) - } else { - fn(w, r, "not_decided") - } - } -} - -func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) { - if !allowMethod(w, r.Method, "GET") { - return - } - vs := version.Versions{ - Server: version.Version, - Cluster: clusterV, - } - - w.Header().Set("Content-Type", "application/json") - b, err := json.Marshal(&vs) - if err != nil { - plog.Panicf("cannot marshal versions to json (%v)", err) - } - w.Write(b) -} - -func logHandleFunc(w http.ResponseWriter, r *http.Request) { - if !allowMethod(w, r.Method, "PUT") { - return - } - - in := struct{ Level string }{} - - d := json.NewDecoder(r.Body) - if err := d.Decode(&in); err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body")) - return - } - - logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level)) - if err != nil { - writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level)) - return - } - - plog.Noticef("globalLogLevel set to %q", logl.String()) - capnslog.SetGlobalLogLevel(logl) - w.WriteHeader(http.StatusNoContent) -} - // parseKeyRequest converts a received http.Request on keysPrefix to // a server Request, performing validation of supplied fields as appropriate. // If any validation fails, an empty Request and non-nil error is returned. diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go index 62c99e19d4a7..589c172dbbb4 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go @@ -20,12 +20,11 @@ import ( "strings" "time" - etcdErr "github.com/coreos/etcd/error" - "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/etcdhttp" "github.com/coreos/etcd/etcdserver/api/v2http/httptypes" - "github.com/coreos/etcd/etcdserver/auth" "github.com/coreos/etcd/pkg/logutil" + "github.com/coreos/pkg/capnslog" ) @@ -39,37 +38,18 @@ var ( mlog = logutil.NewMergeLogger(plog) ) -// writeError logs and writes the given Error to the ResponseWriter -// If Error is an etcdErr, it is rendered to the ResponseWriter -// Otherwise, it is assumed to be a StatusInternalServerError func writeError(w http.ResponseWriter, r *http.Request, err error) { if err == nil { return } - switch e := err.(type) { - case *etcdErr.Error: - e.WriteTo(w) - case *httptypes.HTTPError: - if et := e.WriteTo(w); et != nil { - plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) - } - case auth.Error: + if e, ok := err.(auth.Error); ok { herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error()) if et := herr.WriteTo(w); et != nil { plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) } - default: - switch err { - case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy: - mlog.MergeError(err) - default: - mlog.MergeErrorf("got unexpected response error (%v)", err) - } - herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error") - if et := herr.WriteTo(w); et != nil { - plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr) - } + return } + etcdhttp.WriteError(w, r, err) } // allowMethod verifies that the given method is one of the allowed methods, diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go new file mode 100644 index 000000000000..310715f5cd74 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go @@ -0,0 +1,45 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v3client provides clientv3 interfaces from an etcdserver. +// +// Use v3client by creating an EtcdServer instance, then wrapping it with v3client.New: +// +// import ( +// "context" +// +// "github.com/coreos/etcd/embed" +// "github.com/coreos/etcd/etcdserver/api/v3client" +// ) +// +// ... +// +// // create an embedded EtcdServer from the default configuration +// cfg := embed.NewConfig() +// cfg.Dir = "default.etcd" +// e, err := embed.StartEtcd(cfg) +// if err != nil { +// // handle error! +// } +// +// // wrap the EtcdServer with v3client +// cli := v3client.New(e.Server) +// +// // use like an ordinary clientv3 +// resp, err := cli.Put(context.TODO(), "some-key", "it works!") +// if err != nil { +// // handle error! +// } +// +package v3client diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go new file mode 100644 index 000000000000..c0c07c8d767d --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go @@ -0,0 +1,67 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3client + +import ( + "time" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/v3rpc" + "github.com/coreos/etcd/proxy/grpcproxy/adapter" + + "golang.org/x/net/context" +) + +// New creates a clientv3 client that wraps an in-process EtcdServer. Instead +// of making gRPC calls through sockets, the client makes direct function calls +// to the etcd server through its api/v3rpc function interfaces. +func New(s *etcdserver.EtcdServer) *clientv3.Client { + c := clientv3.NewCtxClient(context.Background()) + + kvc := adapter.KvServerToKvClient(v3rpc.NewQuotaKVServer(s)) + c.KV = clientv3.NewKVFromKVClient(kvc, c) + + lc := adapter.LeaseServerToLeaseClient(v3rpc.NewQuotaLeaseServer(s)) + c.Lease = clientv3.NewLeaseFromLeaseClient(lc, c, time.Second) + + wc := adapter.WatchServerToWatchClient(v3rpc.NewWatchServer(s)) + c.Watcher = &watchWrapper{clientv3.NewWatchFromWatchClient(wc, c)} + + mc := adapter.MaintenanceServerToMaintenanceClient(v3rpc.NewMaintenanceServer(s)) + c.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(mc, c) + + clc := adapter.ClusterServerToClusterClient(v3rpc.NewClusterServer(s)) + c.Cluster = clientv3.NewClusterFromClusterClient(clc, c) + + // TODO: implement clientv3.Auth interface? + + return c +} + +// BlankContext implements Stringer on a context so the ctx string doesn't +// depend on the context's WithValue data, which tends to be unsynchronized +// (e.g., x/net/trace), causing ctx.String() to throw data races. +type blankContext struct{ context.Context } + +func (*blankContext) String() string { return "(blankCtx)" } + +// watchWrapper wraps clientv3 watch calls to blank out the context +// to avoid races on trace data. +type watchWrapper struct{ clientv3.Watcher } + +func (ww *watchWrapper) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan { + return ww.Watcher.Watch(&blankContext{ctx}, key, opts...) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go new file mode 100644 index 000000000000..d6fefd741500 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go @@ -0,0 +1,16 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v3election provides a v3 election service from an etcdserver. +package v3election diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go new file mode 100644 index 000000000000..f9061c079268 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go @@ -0,0 +1,123 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3election + +import ( + "golang.org/x/net/context" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/clientv3/concurrency" + epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" +) + +type electionServer struct { + c *clientv3.Client +} + +func NewElectionServer(c *clientv3.Client) epb.ElectionServer { + return &electionServer{c} +} + +func (es *electionServer) Campaign(ctx context.Context, req *epb.CampaignRequest) (*epb.CampaignResponse, error) { + s, err := es.session(ctx, req.Lease) + if err != nil { + return nil, err + } + e := concurrency.NewElection(s, string(req.Name)) + if err = e.Campaign(ctx, string(req.Value)); err != nil { + return nil, err + } + return &epb.CampaignResponse{ + Header: e.Header(), + Leader: &epb.LeaderKey{ + Name: req.Name, + Key: []byte(e.Key()), + Rev: e.Rev(), + Lease: int64(s.Lease()), + }, + }, nil +} + +func (es *electionServer) Proclaim(ctx context.Context, req *epb.ProclaimRequest) (*epb.ProclaimResponse, error) { + s, err := es.session(ctx, req.Leader.Lease) + if err != nil { + return nil, err + } + e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev) + if err := e.Proclaim(ctx, string(req.Value)); err != nil { + return nil, err + } + return &epb.ProclaimResponse{Header: e.Header()}, nil +} + +func (es *electionServer) Observe(req *epb.LeaderRequest, stream epb.Election_ObserveServer) error { + s, err := es.session(stream.Context(), -1) + if err != nil { + return err + } + e := concurrency.NewElection(s, string(req.Name)) + ch := e.Observe(stream.Context()) + for stream.Context().Err() == nil { + select { + case <-stream.Context().Done(): + case resp, ok := <-ch: + if !ok { + return nil + } + lresp := &epb.LeaderResponse{Header: resp.Header, Kv: resp.Kvs[0]} + if err := stream.Send(lresp); err != nil { + return err + } + } + } + return stream.Context().Err() +} + +func (es *electionServer) Leader(ctx context.Context, req *epb.LeaderRequest) (*epb.LeaderResponse, error) { + s, err := es.session(ctx, -1) + if err != nil { + return nil, err + } + l, lerr := concurrency.NewElection(s, string(req.Name)).Leader(ctx) + if lerr != nil { + return nil, lerr + } + return &epb.LeaderResponse{Header: l.Header, Kv: l.Kvs[0]}, nil +} + +func (es *electionServer) Resign(ctx context.Context, req *epb.ResignRequest) (*epb.ResignResponse, error) { + s, err := es.session(ctx, req.Leader.Lease) + if err != nil { + return nil, err + } + e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev) + if err := e.Resign(ctx); err != nil { + return nil, err + } + return &epb.ResignResponse{Header: e.Header()}, nil +} + +func (es *electionServer) session(ctx context.Context, lease int64) (*concurrency.Session, error) { + s, err := concurrency.NewSession( + es.c, + concurrency.WithLease(clientv3.LeaseID(lease)), + concurrency.WithContext(ctx), + ) + if err != nil { + return nil, err + } + s.Orphan() + return s, nil +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go new file mode 100644 index 000000000000..ac00cbea9837 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go @@ -0,0 +1,313 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: etcdserver/api/v3election/v3electionpb/v3election.proto + +/* +Package v3electionpb is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package gw + +import ( + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3electionpb.CampaignRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Campaign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3electionpb.ProclaimRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Proclaim(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3electionpb.LeaderRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Leader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (v3electionpb.Election_ObserveClient, runtime.ServerMetadata, error) { + var protoReq v3electionpb.LeaderRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + stream, err := client.Observe(ctx, &protoReq) + if err != nil { + return nil, metadata, err + } + header, err := stream.Header() + if err != nil { + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil + +} + +func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3electionpb.ResignRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Resign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterElectionHandlerFromEndpoint is same as RegisterElectionHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterElectionHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterElectionHandler(ctx, mux, conn) +} + +// RegisterElectionHandler registers the http handlers for service Election to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterElectionHandlerClient(ctx, mux, v3electionpb.NewElectionClient(conn)) +} + +// RegisterElectionHandler registers the http handlers for service Election to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "ElectionClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ElectionClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ElectionClient" to call the correct interceptors. +func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error { + + mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Election_Campaign_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Election_Proclaim_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Election_Leader_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Election_Observe_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Election_Observe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Election_Resign_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "campaign"}, "")) + + pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "proclaim"}, "")) + + pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "leader"}, "")) + + pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "observe"}, "")) + + pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "election", "resign"}, "")) +) + +var ( + forward_Election_Campaign_0 = runtime.ForwardResponseMessage + + forward_Election_Proclaim_0 = runtime.ForwardResponseMessage + + forward_Election_Leader_0 = runtime.ForwardResponseMessage + + forward_Election_Observe_0 = runtime.ForwardResponseStream + + forward_Election_Resign_0 = runtime.ForwardResponseMessage +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go new file mode 100644 index 000000000000..92acb1469e95 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go @@ -0,0 +1,2098 @@ +// Code generated by protoc-gen-gogo. +// source: v3election.proto +// DO NOT EDIT! + +/* + Package v3electionpb is a generated protocol buffer package. + + It is generated from these files: + v3election.proto + + It has these top-level messages: + CampaignRequest + CampaignResponse + LeaderKey + LeaderRequest + LeaderResponse + ResignRequest + ResignResponse + ProclaimRequest + ProclaimResponse +*/ +package v3electionpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + mvccpb "github.com/coreos/etcd/mvcc/mvccpb" + + _ "google.golang.org/genproto/googleapis/api/annotations" + + context "golang.org/x/net/context" + + grpc "google.golang.org/grpc" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type CampaignRequest struct { + // name is the election's identifier for the campaign. + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // lease is the ID of the lease attached to leadership of the election. If the + // lease expires or is revoked before resigning leadership, then the + // leadership is transferred to the next campaigner, if any. + Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"` + // value is the initial proclaimed value set when the campaigner wins the + // election. + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *CampaignRequest) Reset() { *m = CampaignRequest{} } +func (m *CampaignRequest) String() string { return proto.CompactTextString(m) } +func (*CampaignRequest) ProtoMessage() {} +func (*CampaignRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{0} } + +func (m *CampaignRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *CampaignRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *CampaignRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type CampaignResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // leader describes the resources used for holding leadereship of the election. + Leader *LeaderKey `protobuf:"bytes,2,opt,name=leader" json:"leader,omitempty"` +} + +func (m *CampaignResponse) Reset() { *m = CampaignResponse{} } +func (m *CampaignResponse) String() string { return proto.CompactTextString(m) } +func (*CampaignResponse) ProtoMessage() {} +func (*CampaignResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{1} } + +func (m *CampaignResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *CampaignResponse) GetLeader() *LeaderKey { + if m != nil { + return m.Leader + } + return nil +} + +type LeaderKey struct { + // name is the election identifier that correponds to the leadership key. + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // key is an opaque key representing the ownership of the election. If the key + // is deleted, then leadership is lost. + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + // rev is the creation revision of the key. It can be used to test for ownership + // of an election during transactions by testing the key's creation revision + // matches rev. + Rev int64 `protobuf:"varint,3,opt,name=rev,proto3" json:"rev,omitempty"` + // lease is the lease ID of the election leader. + Lease int64 `protobuf:"varint,4,opt,name=lease,proto3" json:"lease,omitempty"` +} + +func (m *LeaderKey) Reset() { *m = LeaderKey{} } +func (m *LeaderKey) String() string { return proto.CompactTextString(m) } +func (*LeaderKey) ProtoMessage() {} +func (*LeaderKey) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{2} } + +func (m *LeaderKey) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *LeaderKey) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *LeaderKey) GetRev() int64 { + if m != nil { + return m.Rev + } + return 0 +} + +func (m *LeaderKey) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +type LeaderRequest struct { + // name is the election identifier for the leadership information. + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *LeaderRequest) Reset() { *m = LeaderRequest{} } +func (m *LeaderRequest) String() string { return proto.CompactTextString(m) } +func (*LeaderRequest) ProtoMessage() {} +func (*LeaderRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{3} } + +func (m *LeaderRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +type LeaderResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // kv is the key-value pair representing the latest leader update. + Kv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=kv" json:"kv,omitempty"` +} + +func (m *LeaderResponse) Reset() { *m = LeaderResponse{} } +func (m *LeaderResponse) String() string { return proto.CompactTextString(m) } +func (*LeaderResponse) ProtoMessage() {} +func (*LeaderResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{4} } + +func (m *LeaderResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaderResponse) GetKv() *mvccpb.KeyValue { + if m != nil { + return m.Kv + } + return nil +} + +type ResignRequest struct { + // leader is the leadership to relinquish by resignation. + Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader" json:"leader,omitempty"` +} + +func (m *ResignRequest) Reset() { *m = ResignRequest{} } +func (m *ResignRequest) String() string { return proto.CompactTextString(m) } +func (*ResignRequest) ProtoMessage() {} +func (*ResignRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{5} } + +func (m *ResignRequest) GetLeader() *LeaderKey { + if m != nil { + return m.Leader + } + return nil +} + +type ResignResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *ResignResponse) Reset() { *m = ResignResponse{} } +func (m *ResignResponse) String() string { return proto.CompactTextString(m) } +func (*ResignResponse) ProtoMessage() {} +func (*ResignResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{6} } + +func (m *ResignResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +type ProclaimRequest struct { + // leader is the leadership hold on the election. + Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader" json:"leader,omitempty"` + // value is an update meant to overwrite the leader's current value. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *ProclaimRequest) Reset() { *m = ProclaimRequest{} } +func (m *ProclaimRequest) String() string { return proto.CompactTextString(m) } +func (*ProclaimRequest) ProtoMessage() {} +func (*ProclaimRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{7} } + +func (m *ProclaimRequest) GetLeader() *LeaderKey { + if m != nil { + return m.Leader + } + return nil +} + +func (m *ProclaimRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type ProclaimResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *ProclaimResponse) Reset() { *m = ProclaimResponse{} } +func (m *ProclaimResponse) String() string { return proto.CompactTextString(m) } +func (*ProclaimResponse) ProtoMessage() {} +func (*ProclaimResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Election, []int{8} } + +func (m *ProclaimResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func init() { + proto.RegisterType((*CampaignRequest)(nil), "v3electionpb.CampaignRequest") + proto.RegisterType((*CampaignResponse)(nil), "v3electionpb.CampaignResponse") + proto.RegisterType((*LeaderKey)(nil), "v3electionpb.LeaderKey") + proto.RegisterType((*LeaderRequest)(nil), "v3electionpb.LeaderRequest") + proto.RegisterType((*LeaderResponse)(nil), "v3electionpb.LeaderResponse") + proto.RegisterType((*ResignRequest)(nil), "v3electionpb.ResignRequest") + proto.RegisterType((*ResignResponse)(nil), "v3electionpb.ResignResponse") + proto.RegisterType((*ProclaimRequest)(nil), "v3electionpb.ProclaimRequest") + proto.RegisterType((*ProclaimResponse)(nil), "v3electionpb.ProclaimResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Election service + +type ElectionClient interface { + // Campaign waits to acquire leadership in an election, returning a LeaderKey + // representing the leadership if successful. The LeaderKey can then be used + // to issue new values on the election, transactionally guard API requests on + // leadership still being held, and resign from the election. + Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) + // Proclaim updates the leader's posted value with a new value. + Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) + // Leader returns the current election proclamation, if any. + Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) + // Observe streams election proclamations in-order as made by the election's + // elected leaders. + Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) + // Resign releases election leadership so other campaigners may acquire + // leadership on the election. + Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) +} + +type electionClient struct { + cc *grpc.ClientConn +} + +func NewElectionClient(cc *grpc.ClientConn) ElectionClient { + return &electionClient{cc} +} + +func (c *electionClient) Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) { + out := new(CampaignResponse) + err := grpc.Invoke(ctx, "/v3electionpb.Election/Campaign", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *electionClient) Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) { + out := new(ProclaimResponse) + err := grpc.Invoke(ctx, "/v3electionpb.Election/Proclaim", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *electionClient) Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) { + out := new(LeaderResponse) + err := grpc.Invoke(ctx, "/v3electionpb.Election/Leader", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *electionClient) Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Election_serviceDesc.Streams[0], c.cc, "/v3electionpb.Election/Observe", opts...) + if err != nil { + return nil, err + } + x := &electionObserveClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Election_ObserveClient interface { + Recv() (*LeaderResponse, error) + grpc.ClientStream +} + +type electionObserveClient struct { + grpc.ClientStream +} + +func (x *electionObserveClient) Recv() (*LeaderResponse, error) { + m := new(LeaderResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *electionClient) Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) { + out := new(ResignResponse) + err := grpc.Invoke(ctx, "/v3electionpb.Election/Resign", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Election service + +type ElectionServer interface { + // Campaign waits to acquire leadership in an election, returning a LeaderKey + // representing the leadership if successful. The LeaderKey can then be used + // to issue new values on the election, transactionally guard API requests on + // leadership still being held, and resign from the election. + Campaign(context.Context, *CampaignRequest) (*CampaignResponse, error) + // Proclaim updates the leader's posted value with a new value. + Proclaim(context.Context, *ProclaimRequest) (*ProclaimResponse, error) + // Leader returns the current election proclamation, if any. + Leader(context.Context, *LeaderRequest) (*LeaderResponse, error) + // Observe streams election proclamations in-order as made by the election's + // elected leaders. + Observe(*LeaderRequest, Election_ObserveServer) error + // Resign releases election leadership so other campaigners may acquire + // leadership on the election. + Resign(context.Context, *ResignRequest) (*ResignResponse, error) +} + +func RegisterElectionServer(s *grpc.Server, srv ElectionServer) { + s.RegisterService(&_Election_serviceDesc, srv) +} + +func _Election_Campaign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CampaignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElectionServer).Campaign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3electionpb.Election/Campaign", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElectionServer).Campaign(ctx, req.(*CampaignRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Election_Proclaim_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProclaimRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElectionServer).Proclaim(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3electionpb.Election/Proclaim", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElectionServer).Proclaim(ctx, req.(*ProclaimRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Election_Leader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElectionServer).Leader(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3electionpb.Election/Leader", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElectionServer).Leader(ctx, req.(*LeaderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Election_Observe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(LeaderRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ElectionServer).Observe(m, &electionObserveServer{stream}) +} + +type Election_ObserveServer interface { + Send(*LeaderResponse) error + grpc.ServerStream +} + +type electionObserveServer struct { + grpc.ServerStream +} + +func (x *electionObserveServer) Send(m *LeaderResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Election_Resign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElectionServer).Resign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3electionpb.Election/Resign", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElectionServer).Resign(ctx, req.(*ResignRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Election_serviceDesc = grpc.ServiceDesc{ + ServiceName: "v3electionpb.Election", + HandlerType: (*ElectionServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Campaign", + Handler: _Election_Campaign_Handler, + }, + { + MethodName: "Proclaim", + Handler: _Election_Proclaim_Handler, + }, + { + MethodName: "Leader", + Handler: _Election_Leader_Handler, + }, + { + MethodName: "Resign", + Handler: _Election_Resign_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Observe", + Handler: _Election_Observe_Handler, + ServerStreams: true, + }, + }, + Metadata: "v3election.proto", +} + +func (m *CampaignRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CampaignRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Lease != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Lease)) + } + if len(m.Value) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *CampaignResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CampaignResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) + n1, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Leader != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size())) + n2, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *LeaderKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaderKey) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.Rev != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Rev)) + } + if m.Lease != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Lease)) + } + return i, nil +} + +func (m *LeaderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaderRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *LeaderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaderResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) + n3, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Kv != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Kv.Size())) + n4, err := m.Kv.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *ResignRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResignRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Leader != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size())) + n5, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *ResignResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResignResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) + n6, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *ProclaimRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProclaimRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Leader != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Leader.Size())) + n7, err := m.Leader.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *ProclaimResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProclaimResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Election(dAtA, i, uint64(m.Header.Size())) + n8, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func encodeFixed64V3Election(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32V3Election(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintV3Election(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *CampaignRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + if m.Lease != 0 { + n += 1 + sovV3Election(uint64(m.Lease)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *CampaignResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + if m.Leader != nil { + l = m.Leader.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *LeaderKey) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + if m.Rev != 0 { + n += 1 + sovV3Election(uint64(m.Rev)) + } + if m.Lease != 0 { + n += 1 + sovV3Election(uint64(m.Lease)) + } + return n +} + +func (m *LeaderRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *LeaderResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + if m.Kv != nil { + l = m.Kv.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *ResignRequest) Size() (n int) { + var l int + _ = l + if m.Leader != nil { + l = m.Leader.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *ResignResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *ProclaimRequest) Size() (n int) { + var l int + _ = l + if m.Leader != nil { + l = m.Leader.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func (m *ProclaimResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Election(uint64(l)) + } + return n +} + +func sovV3Election(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozV3Election(x uint64) (n int) { + return sovV3Election(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CampaignRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CampaignRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CampaignRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CampaignResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CampaignResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CampaignResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leader == nil { + m.Leader = &LeaderKey{} + } + if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaderKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaderKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaderKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Rev", wireType) + } + m.Rev = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Rev |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kv == nil { + m.Kv = &mvccpb.KeyValue{} + } + if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResignRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResignRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResignRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leader == nil { + m.Leader = &LeaderKey{} + } + if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResignResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResignResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResignResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProclaimRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProclaimRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProclaimRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Leader == nil { + m.Leader = &LeaderKey{} + } + if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProclaimResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProclaimResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProclaimResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Election + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Election + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Election(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Election + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipV3Election(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Election + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Election + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Election + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthV3Election + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Election + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipV3Election(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthV3Election = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowV3Election = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("v3election.proto", fileDescriptorV3Election) } + +var fileDescriptorV3Election = []byte{ + // 540 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x65, 0x9d, 0x10, 0xca, 0x90, 0xb6, 0x96, 0x55, 0x89, 0x34, 0xa4, 0x26, 0xda, 0x02, 0xaa, + 0x72, 0xf0, 0xa2, 0x86, 0x53, 0x4e, 0x08, 0x04, 0xaa, 0x54, 0x24, 0xc0, 0x07, 0x04, 0xc7, 0x8d, + 0x3b, 0x4a, 0xa2, 0x38, 0xde, 0xc5, 0x4e, 0x2d, 0xe5, 0xca, 0x2f, 0x70, 0xe1, 0x33, 0xf8, 0x0c, + 0x8e, 0x48, 0xfc, 0x00, 0x0a, 0x7c, 0x08, 0xda, 0x5d, 0x1b, 0x3b, 0x6e, 0x88, 0x50, 0x73, 0xb1, + 0xc6, 0x33, 0xcf, 0xf3, 0xe6, 0xbd, 0x9d, 0x35, 0xd8, 0x69, 0x1f, 0x43, 0x0c, 0xe6, 0x13, 0x11, + 0x79, 0x32, 0x16, 0x73, 0xe1, 0x34, 0x8b, 0x8c, 0x1c, 0xb6, 0x0f, 0x46, 0x62, 0x24, 0x74, 0x81, + 0xa9, 0xc8, 0x60, 0xda, 0x8f, 0x70, 0x1e, 0x5c, 0x30, 0xf5, 0x48, 0x30, 0x4e, 0x31, 0x2e, 0x85, + 0x72, 0xc8, 0x62, 0x19, 0x64, 0xb8, 0x43, 0x8d, 0x9b, 0xa5, 0x41, 0xa0, 0x1f, 0x72, 0xc8, 0xa6, + 0x69, 0x56, 0xea, 0x8c, 0x84, 0x18, 0x85, 0xc8, 0xb8, 0x9c, 0x30, 0x1e, 0x45, 0x62, 0xce, 0x15, + 0x63, 0x62, 0xaa, 0xf4, 0x2d, 0xec, 0x3f, 0xe7, 0x33, 0xc9, 0x27, 0xa3, 0xc8, 0xc7, 0x8f, 0x97, + 0x98, 0xcc, 0x1d, 0x07, 0xea, 0x11, 0x9f, 0x61, 0x8b, 0x74, 0xc9, 0x49, 0xd3, 0xd7, 0xb1, 0x73, + 0x00, 0x37, 0x43, 0xe4, 0x09, 0xb6, 0xac, 0x2e, 0x39, 0xa9, 0xf9, 0xe6, 0x45, 0x65, 0x53, 0x1e, + 0x5e, 0x62, 0xab, 0xa6, 0xa1, 0xe6, 0x85, 0x2e, 0xc0, 0x2e, 0x5a, 0x26, 0x52, 0x44, 0x09, 0x3a, + 0x4f, 0xa0, 0x31, 0x46, 0x7e, 0x81, 0xb1, 0xee, 0x7a, 0xe7, 0xb4, 0xe3, 0x95, 0x85, 0x78, 0x39, + 0xee, 0x4c, 0x63, 0xfc, 0x0c, 0xeb, 0x30, 0x68, 0x84, 0xe6, 0x2b, 0x4b, 0x7f, 0x75, 0xd7, 0x2b, + 0x5b, 0xe6, 0xbd, 0xd2, 0xb5, 0x73, 0x5c, 0xf8, 0x19, 0x8c, 0x7e, 0x80, 0xdb, 0x7f, 0x93, 0x6b, + 0x75, 0xd8, 0x50, 0x9b, 0xe2, 0x42, 0xb7, 0x6b, 0xfa, 0x2a, 0x54, 0x99, 0x18, 0x53, 0xad, 0xa0, + 0xe6, 0xab, 0xb0, 0xd0, 0x5a, 0x2f, 0x69, 0xa5, 0xc7, 0xb0, 0x6b, 0x5a, 0x6f, 0xb0, 0x89, 0x8e, + 0x61, 0x2f, 0x07, 0x6d, 0x25, 0xbc, 0x0b, 0xd6, 0x34, 0xcd, 0x44, 0xdb, 0x9e, 0x39, 0x51, 0xef, + 0x1c, 0x17, 0xef, 0x94, 0xc1, 0xbe, 0x35, 0x4d, 0xe9, 0x53, 0xd8, 0xf5, 0x31, 0x29, 0x9d, 0x5a, + 0xe1, 0x15, 0xf9, 0x3f, 0xaf, 0x5e, 0xc2, 0x5e, 0xde, 0x61, 0x9b, 0x59, 0xe9, 0x7b, 0xd8, 0x7f, + 0x13, 0x8b, 0x20, 0xe4, 0x93, 0xd9, 0x75, 0x67, 0x29, 0x16, 0xc9, 0x2a, 0x2f, 0xd2, 0x19, 0xd8, + 0x45, 0xe7, 0x6d, 0x66, 0x3c, 0xfd, 0x5a, 0x87, 0x9d, 0x17, 0xd9, 0x00, 0x8e, 0x84, 0x9d, 0x7c, + 0x3f, 0x9d, 0xa3, 0xd5, 0xc9, 0x2a, 0x57, 0xa1, 0xed, 0xfe, 0xab, 0x6c, 0x58, 0xe8, 0xc3, 0x4f, + 0x3f, 0x7e, 0x7f, 0xb6, 0xee, 0xd3, 0x36, 0x4b, 0xfb, 0x3c, 0x94, 0x63, 0xce, 0x72, 0x34, 0x0b, + 0x32, 0xec, 0x80, 0xf4, 0x14, 0x63, 0x2e, 0xa4, 0xca, 0x58, 0xb1, 0xae, 0xca, 0x58, 0xd5, 0xbf, + 0x89, 0x51, 0x66, 0x58, 0xc5, 0x38, 0x86, 0x86, 0x71, 0xd9, 0xb9, 0xb7, 0xce, 0xfb, 0x9c, 0xad, + 0xb3, 0xbe, 0x98, 0x71, 0x1d, 0x6b, 0xae, 0x23, 0xda, 0xba, 0xca, 0x65, 0xce, 0x4d, 0x31, 0x85, + 0x70, 0xeb, 0xf5, 0x50, 0xfb, 0xbf, 0x0d, 0xd5, 0x03, 0x4d, 0xe5, 0xd2, 0xc3, 0xab, 0x54, 0xc2, + 0x74, 0x1f, 0x90, 0xde, 0x63, 0xa2, 0x74, 0x99, 0xa5, 0xad, 0x92, 0xad, 0x5c, 0x86, 0x2a, 0xd9, + 0xea, 0x9e, 0x6f, 0xd2, 0x15, 0x6b, 0xe4, 0x80, 0xf4, 0x9e, 0xd9, 0xdf, 0x96, 0x2e, 0xf9, 0xbe, + 0x74, 0xc9, 0xcf, 0xa5, 0x4b, 0xbe, 0xfc, 0x72, 0x6f, 0x0c, 0x1b, 0xfa, 0x8f, 0xd9, 0xff, 0x13, + 0x00, 0x00, 0xff, 0xff, 0xfc, 0x4d, 0x5a, 0x40, 0xca, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto new file mode 100644 index 000000000000..ebf6c88f7fa3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto @@ -0,0 +1,119 @@ +syntax = "proto3"; +package v3electionpb; + +import "gogoproto/gogo.proto"; +import "etcd/etcdserver/etcdserverpb/rpc.proto"; +import "etcd/mvcc/mvccpb/kv.proto"; + +// for grpc-gateway +import "google/api/annotations.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// The election service exposes client-side election facilities as a gRPC interface. +service Election { + // Campaign waits to acquire leadership in an election, returning a LeaderKey + // representing the leadership if successful. The LeaderKey can then be used + // to issue new values on the election, transactionally guard API requests on + // leadership still being held, and resign from the election. + rpc Campaign(CampaignRequest) returns (CampaignResponse) { + option (google.api.http) = { + post: "/v3alpha/election/campaign" + body: "*" + }; + } + // Proclaim updates the leader's posted value with a new value. + rpc Proclaim(ProclaimRequest) returns (ProclaimResponse) { + option (google.api.http) = { + post: "/v3alpha/election/proclaim" + body: "*" + }; + } + // Leader returns the current election proclamation, if any. + rpc Leader(LeaderRequest) returns (LeaderResponse) { + option (google.api.http) = { + post: "/v3alpha/election/leader" + body: "*" + }; + } + // Observe streams election proclamations in-order as made by the election's + // elected leaders. + rpc Observe(LeaderRequest) returns (stream LeaderResponse) { + option (google.api.http) = { + post: "/v3alpha/election/observe" + body: "*" + }; + } + // Resign releases election leadership so other campaigners may acquire + // leadership on the election. + rpc Resign(ResignRequest) returns (ResignResponse) { + option (google.api.http) = { + post: "/v3alpha/election/resign" + body: "*" + }; + } +} + +message CampaignRequest { + // name is the election's identifier for the campaign. + bytes name = 1; + // lease is the ID of the lease attached to leadership of the election. If the + // lease expires or is revoked before resigning leadership, then the + // leadership is transferred to the next campaigner, if any. + int64 lease = 2; + // value is the initial proclaimed value set when the campaigner wins the + // election. + bytes value = 3; +} + +message CampaignResponse { + etcdserverpb.ResponseHeader header = 1; + // leader describes the resources used for holding leadereship of the election. + LeaderKey leader = 2; +} + +message LeaderKey { + // name is the election identifier that correponds to the leadership key. + bytes name = 1; + // key is an opaque key representing the ownership of the election. If the key + // is deleted, then leadership is lost. + bytes key = 2; + // rev is the creation revision of the key. It can be used to test for ownership + // of an election during transactions by testing the key's creation revision + // matches rev. + int64 rev = 3; + // lease is the lease ID of the election leader. + int64 lease = 4; +} + +message LeaderRequest { + // name is the election identifier for the leadership information. + bytes name = 1; +} + +message LeaderResponse { + etcdserverpb.ResponseHeader header = 1; + // kv is the key-value pair representing the latest leader update. + mvccpb.KeyValue kv = 2; +} + +message ResignRequest { + // leader is the leadership to relinquish by resignation. + LeaderKey leader = 1; +} + +message ResignResponse { + etcdserverpb.ResponseHeader header = 1; +} + +message ProclaimRequest { + // leader is the leadership hold on the election. + LeaderKey leader = 1; + // value is an update meant to overwrite the leader's current value. + bytes value = 2; +} + +message ProclaimResponse { + etcdserverpb.ResponseHeader header = 1; +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go new file mode 100644 index 000000000000..e0a1008abc9e --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go @@ -0,0 +1,16 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v3lock provides a v3 locking service from an etcdserver. +package v3lock diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go new file mode 100644 index 000000000000..66465bf13f62 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go @@ -0,0 +1,56 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3lock + +import ( + "golang.org/x/net/context" + + "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/clientv3/concurrency" + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" +) + +type lockServer struct { + c *clientv3.Client +} + +func NewLockServer(c *clientv3.Client) v3lockpb.LockServer { + return &lockServer{c} +} + +func (ls *lockServer) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) { + s, err := concurrency.NewSession( + ls.c, + concurrency.WithLease(clientv3.LeaseID(req.Lease)), + concurrency.WithContext(ctx), + ) + if err != nil { + return nil, err + } + s.Orphan() + m := concurrency.NewMutex(s, string(req.Name)) + if err = m.Lock(ctx); err != nil { + return nil, err + } + return &v3lockpb.LockResponse{Header: m.Header(), Key: []byte(m.Key())}, nil +} + +func (ls *lockServer) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) { + resp, err := ls.c.Delete(ctx, string(req.Key)) + if err != nil { + return nil, err + } + return &v3lockpb.UnlockResponse{Header: resp.Header}, nil +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go new file mode 100644 index 000000000000..5aef4756dfe7 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go @@ -0,0 +1,167 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: etcdserver/api/v3lock/v3lockpb/v3lock.proto + +/* +Package v3lockpb is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package gw + +import ( + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3lockpb.LockRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Lock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq v3lockpb.UnlockRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Unlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterLockHandlerFromEndpoint is same as RegisterLockHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterLockHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterLockHandler(ctx, mux, conn) +} + +// RegisterLockHandler registers the http handlers for service Lock to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterLockHandlerClient(ctx, mux, v3lockpb.NewLockClient(conn)) +} + +// RegisterLockHandler registers the http handlers for service Lock to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "LockClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LockClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "LockClient" to call the correct interceptors. +func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error { + + mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lock_Lock_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lock_Unlock_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3alpha", "lock"}, "")) + + pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lock", "unlock"}, "")) +) + +var ( + forward_Lock_Lock_0 = runtime.ForwardResponseMessage + + forward_Lock_Unlock_0 = runtime.ForwardResponseMessage +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go new file mode 100644 index 000000000000..dcf2bad4019c --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go @@ -0,0 +1,978 @@ +// Code generated by protoc-gen-gogo. +// source: v3lock.proto +// DO NOT EDIT! + +/* + Package v3lockpb is a generated protocol buffer package. + + It is generated from these files: + v3lock.proto + + It has these top-level messages: + LockRequest + LockResponse + UnlockRequest + UnlockResponse +*/ +package v3lockpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + _ "google.golang.org/genproto/googleapis/api/annotations" + + context "golang.org/x/net/context" + + grpc "google.golang.org/grpc" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type LockRequest struct { + // name is the identifier for the distributed shared lock to be acquired. + Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // lease is the ID of the lease that will be attached to ownership of the + // lock. If the lease expires or is revoked and currently holds the lock, + // the lock is automatically released. Calls to Lock with the same lease will + // be treated as a single acquistion; locking twice with the same lease is a + // no-op. + Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"` +} + +func (m *LockRequest) Reset() { *m = LockRequest{} } +func (m *LockRequest) String() string { return proto.CompactTextString(m) } +func (*LockRequest) ProtoMessage() {} +func (*LockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{0} } + +func (m *LockRequest) GetName() []byte { + if m != nil { + return m.Name + } + return nil +} + +func (m *LockRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +type LockResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // key is a key that will exist on etcd for the duration that the Lock caller + // owns the lock. Users should not modify this key or the lock may exhibit + // undefined behavior. + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` +} + +func (m *LockResponse) Reset() { *m = LockResponse{} } +func (m *LockResponse) String() string { return proto.CompactTextString(m) } +func (*LockResponse) ProtoMessage() {} +func (*LockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{1} } + +func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LockResponse) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +type UnlockRequest struct { + // key is the lock ownership key granted by Lock. + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` +} + +func (m *UnlockRequest) Reset() { *m = UnlockRequest{} } +func (m *UnlockRequest) String() string { return proto.CompactTextString(m) } +func (*UnlockRequest) ProtoMessage() {} +func (*UnlockRequest) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{2} } + +func (m *UnlockRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +type UnlockResponse struct { + Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` +} + +func (m *UnlockResponse) Reset() { *m = UnlockResponse{} } +func (m *UnlockResponse) String() string { return proto.CompactTextString(m) } +func (*UnlockResponse) ProtoMessage() {} +func (*UnlockResponse) Descriptor() ([]byte, []int) { return fileDescriptorV3Lock, []int{3} } + +func (m *UnlockResponse) GetHeader() *etcdserverpb.ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func init() { + proto.RegisterType((*LockRequest)(nil), "v3lockpb.LockRequest") + proto.RegisterType((*LockResponse)(nil), "v3lockpb.LockResponse") + proto.RegisterType((*UnlockRequest)(nil), "v3lockpb.UnlockRequest") + proto.RegisterType((*UnlockResponse)(nil), "v3lockpb.UnlockResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Lock service + +type LockClient interface { + // Lock acquires a distributed shared lock on a given named lock. + // On success, it will return a unique key that exists so long as the + // lock is held by the caller. This key can be used in conjunction with + // transactions to safely ensure updates to etcd only occur while holding + // lock ownership. The lock is held until Unlock is called on the key or the + // lease associate with the owner expires. + Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) + // Unlock takes a key returned by Lock and releases the hold on lock. The + // next Lock caller waiting for the lock will then be woken up and given + // ownership of the lock. + Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) +} + +type lockClient struct { + cc *grpc.ClientConn +} + +func NewLockClient(cc *grpc.ClientConn) LockClient { + return &lockClient{cc} +} + +func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) { + out := new(LockResponse) + err := grpc.Invoke(ctx, "/v3lockpb.Lock/Lock", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *lockClient) Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) { + out := new(UnlockResponse) + err := grpc.Invoke(ctx, "/v3lockpb.Lock/Unlock", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Lock service + +type LockServer interface { + // Lock acquires a distributed shared lock on a given named lock. + // On success, it will return a unique key that exists so long as the + // lock is held by the caller. This key can be used in conjunction with + // transactions to safely ensure updates to etcd only occur while holding + // lock ownership. The lock is held until Unlock is called on the key or the + // lease associate with the owner expires. + Lock(context.Context, *LockRequest) (*LockResponse, error) + // Unlock takes a key returned by Lock and releases the hold on lock. The + // next Lock caller waiting for the lock will then be woken up and given + // ownership of the lock. + Unlock(context.Context, *UnlockRequest) (*UnlockResponse, error) +} + +func RegisterLockServer(s *grpc.Server, srv LockServer) { + s.RegisterService(&_Lock_serviceDesc, srv) +} + +func _Lock_Lock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LockRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LockServer).Lock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3lockpb.Lock/Lock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LockServer).Lock(ctx, req.(*LockRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lock_Unlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UnlockRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LockServer).Unlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v3lockpb.Lock/Unlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LockServer).Unlock(ctx, req.(*UnlockRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Lock_serviceDesc = grpc.ServiceDesc{ + ServiceName: "v3lockpb.Lock", + HandlerType: (*LockServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Lock", + Handler: _Lock_Lock_Handler, + }, + { + MethodName: "Unlock", + Handler: _Lock_Unlock_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "v3lock.proto", +} + +func (m *LockRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LockRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Lease != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(m.Lease)) + } + return i, nil +} + +func (m *LockResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LockResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size())) + n1, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + return i, nil +} + +func (m *UnlockRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnlockRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + return i, nil +} + +func (m *UnlockResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnlockResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Header != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintV3Lock(dAtA, i, uint64(m.Header.Size())) + n2, err := m.Header.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func encodeFixed64V3Lock(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32V3Lock(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintV3Lock(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *LockRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovV3Lock(uint64(l)) + } + if m.Lease != 0 { + n += 1 + sovV3Lock(uint64(m.Lease)) + } + return n +} + +func (m *LockResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Lock(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovV3Lock(uint64(l)) + } + return n +} + +func (m *UnlockRequest) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovV3Lock(uint64(l)) + } + return n +} + +func (m *UnlockResponse) Size() (n int) { + var l int + _ = l + if m.Header != nil { + l = m.Header.Size() + n += 1 + l + sovV3Lock(uint64(l)) + } + return n +} + +func sovV3Lock(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozV3Lock(x uint64) (n int) { + return sovV3Lock(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LockRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LockRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LockRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Lock + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...) + if m.Name == nil { + m.Name = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + m.Lease = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lease |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipV3Lock(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Lock + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LockResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LockResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Lock + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Lock + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Lock(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Lock + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UnlockRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UnlockRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UnlockRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthV3Lock + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Lock(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Lock + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UnlockResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UnlockResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UnlockResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowV3Lock + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthV3Lock + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Header == nil { + m.Header = &etcdserverpb.ResponseHeader{} + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipV3Lock(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthV3Lock + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipV3Lock(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Lock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Lock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Lock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthV3Lock + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowV3Lock + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipV3Lock(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthV3Lock = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowV3Lock = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("v3lock.proto", fileDescriptorV3Lock) } + +var fileDescriptorV3Lock = []byte{ + // 336 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9, + 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44, + 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x2d, 0xb5, 0x24, 0x39, + 0x45, 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, + 0x92, 0xa1, 0xea, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, 0x13, + 0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x21, 0xb2, 0x4a, 0xe6, 0x5c, 0xdc, + 0x3e, 0xf9, 0xc9, 0xd9, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79, + 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x60, 0xb6, 0x90, 0x08, 0x17, 0x6b, + 0x4e, 0x6a, 0x62, 0x71, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x73, 0x10, 0x84, 0xa3, 0x14, 0xc6, + 0xc5, 0x03, 0xd1, 0x58, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x64, 0xc2, 0xc5, 0x96, 0x91, 0x9a, + 0x98, 0x92, 0x5a, 0x04, 0xd6, 0xcb, 0x6d, 0x24, 0xa3, 0x87, 0xec, 0x1e, 0x3d, 0x98, 0x3a, 0x0f, + 0xb0, 0x9a, 0x20, 0xa8, 0x5a, 0x21, 0x01, 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0xb0, 0xc9, 0x3c, 0x41, + 0x20, 0xa6, 0x92, 0x22, 0x17, 0x6f, 0x68, 0x5e, 0x0e, 0x92, 0x93, 0xa0, 0x4a, 0x18, 0x11, 0x4a, + 0xdc, 0xb8, 0xf8, 0x60, 0x4a, 0x28, 0xb1, 0xdc, 0x68, 0x17, 0x23, 0x17, 0x0b, 0xc8, 0x0f, 0x42, + 0x21, 0x50, 0x5a, 0x54, 0x0f, 0x16, 0xe6, 0x7a, 0x48, 0x81, 0x22, 0x25, 0x86, 0x2e, 0x0c, 0x31, + 0x4d, 0x49, 0xb6, 0xe9, 0xf2, 0x93, 0xc9, 0x4c, 0xe2, 0x4a, 0x42, 0xfa, 0x65, 0xc6, 0x89, 0x39, + 0x05, 0x19, 0x89, 0xfa, 0x20, 0x55, 0x60, 0xc2, 0x8a, 0x51, 0x4b, 0x28, 0x86, 0x8b, 0x0d, 0xe2, + 0x4c, 0x21, 0x71, 0x84, 0x01, 0x28, 0x7e, 0x93, 0x92, 0xc0, 0x94, 0x80, 0x9a, 0x2d, 0x0f, 0x36, + 0x5b, 0x52, 0x49, 0x04, 0xd5, 0xec, 0xd2, 0x3c, 0xa8, 0xe9, 0x4e, 0x02, 0x27, 0x1e, 0xc9, 0x31, + 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8c, 0xc7, 0x72, 0x0c, 0x49, 0x6c, 0xe0, + 0x18, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb6, 0xa0, 0x26, 0x28, 0x47, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto new file mode 100644 index 000000000000..3e92a6ec277c --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto @@ -0,0 +1,65 @@ +syntax = "proto3"; +package v3lockpb; + +import "gogoproto/gogo.proto"; +import "etcd/etcdserver/etcdserverpb/rpc.proto"; + +// for grpc-gateway +import "google/api/annotations.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; + +// The lock service exposes client-side locking facilities as a gRPC interface. +service Lock { + // Lock acquires a distributed shared lock on a given named lock. + // On success, it will return a unique key that exists so long as the + // lock is held by the caller. This key can be used in conjunction with + // transactions to safely ensure updates to etcd only occur while holding + // lock ownership. The lock is held until Unlock is called on the key or the + // lease associate with the owner expires. + rpc Lock(LockRequest) returns (LockResponse) { + option (google.api.http) = { + post: "/v3alpha/lock/lock" + body: "*" + }; + } + + // Unlock takes a key returned by Lock and releases the hold on lock. The + // next Lock caller waiting for the lock will then be woken up and given + // ownership of the lock. + rpc Unlock(UnlockRequest) returns (UnlockResponse) { + option (google.api.http) = { + post: "/v3alpha/lock/unlock" + body: "*" + }; + } +} + +message LockRequest { + // name is the identifier for the distributed shared lock to be acquired. + bytes name = 1; + // lease is the ID of the lease that will be attached to ownership of the + // lock. If the lease expires or is revoked and currently holds the lock, + // the lock is automatically released. Calls to Lock with the same lease will + // be treated as a single acquistion; locking twice with the same lease is a + // no-op. + int64 lease = 2; +} + +message LockResponse { + etcdserverpb.ResponseHeader header = 1; + // key is a key that will exist on etcd for the duration that the Lock caller + // owns the lock. Users should not modify this key or the lock may exhibit + // undefined behavior. + bytes key = 2; +} + +message UnlockRequest { + // key is the lock ownership key granted by Lock. + bytes key = 1; +} + +message UnlockResponse { + etcdserverpb.ResponseHeader header = 1; +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go index 88174e3bac24..5333491a2e25 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go @@ -16,6 +16,10 @@ package v3rpc import ( "crypto/tls" + "io/ioutil" + "math" + "os" + "sync" "github.com/coreos/etcd/etcdserver" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" @@ -24,11 +28,16 @@ import ( "google.golang.org/grpc/grpclog" ) -func init() { - grpclog.SetLogger(plog) -} +const ( + grpcOverheadBytes = 512 * 1024 + maxStreams = math.MaxUint32 + maxSendBytes = math.MaxInt32 +) + +// integration tests call this multiple times, which is racey in gRPC side +var grpclogOnce sync.Once -func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { +func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server { var opts []grpc.ServerOption opts = append(opts, grpc.CustomCodec(&codec{})) if tls != nil { @@ -36,8 +45,11 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { } opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s))) opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s))) + opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes))) + opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes)) + opts = append(opts, grpc.MaxConcurrentStreams(maxStreams)) + grpcServer := grpc.NewServer(append(opts, gopts...)...) - grpcServer := grpc.NewServer(opts...) pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s)) pb.RegisterWatchServer(grpcServer, NewWatchServer(s)) pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s)) @@ -45,5 +57,15 @@ func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server { pb.RegisterAuthServer(grpcServer, NewAuthServer(s)) pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s)) + grpclogOnce.Do(func() { + if s.Cfg.Debug { + grpc.EnableTracing = true + // enable info, warning, error + grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr)) + } else { + // only discard info + grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr)) + } + }) return grpcServer } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go index 29aef2914a5b..de9470a8905c 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go @@ -45,7 +45,7 @@ func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor { return nil, rpctypes.ErrGRPCNotCapable } - md, ok := metadata.FromContext(ctx) + md, ok := metadata.FromIncomingContext(ctx) if ok { if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { if s.Leader() == types.ID(raft.None) { @@ -66,7 +66,7 @@ func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor return rpctypes.ErrGRPCNotCapable } - md, ok := metadata.FromContext(ss.Context()) + md, ok := metadata.FromIncomingContext(ss.Context()) if ok { if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader { if s.Leader() == types.ID(raft.None) { diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go index 6ea7bbacde0a..d0220e03a26e 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go @@ -134,6 +134,12 @@ func checkPutRequest(r *pb.PutRequest) error { if len(r.Key) == 0 { return rpctypes.ErrGRPCEmptyKey } + if r.IgnoreValue && len(r.Value) != 0 { + return rpctypes.ErrGRPCValueProvided + } + if r.IgnoreLease && r.Lease != 0 { + return rpctypes.ErrGRPCLeaseProvided + } return nil } @@ -246,8 +252,8 @@ func checkRequestOp(u *pb.RequestOp) error { return checkDeleteRequest(uv.RequestDeleteRange) } default: - // empty op - return nil + // empty op / nil entry + return rpctypes.ErrGRPCKeyNotFound } return nil } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go index be6e20b97fb6..91618d115fcb 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go @@ -18,6 +18,7 @@ import ( "io" "github.com/coreos/etcd/etcdserver" + "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/lease" "golang.org/x/net/context" @@ -53,20 +54,45 @@ func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeReques func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) { resp, err := ls.le.LeaseTimeToLive(ctx, rr) - if err != nil { + if err != nil && err != lease.ErrLeaseNotFound { return nil, togRPCError(err) } + if err == lease.ErrLeaseNotFound { + resp = &pb.LeaseTimeToLiveResponse{ + Header: &pb.ResponseHeader{}, + ID: rr.ID, + TTL: -1, + } + } ls.hdr.fill(resp.Header) return resp, nil } -func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { +func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) { + errc := make(chan error, 1) + go func() { + errc <- ls.leaseKeepAlive(stream) + }() + select { + case err = <-errc: + case <-stream.Context().Done(): + // the only server-side cancellation is noleader for now. + err = stream.Context().Err() + if err == context.Canceled { + err = rpctypes.ErrGRPCNoLeader + } + } + return err +} + +func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error { for { req, err := stream.Recv() if err == io.EOF { return nil } if err != nil { + plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error()) return err } @@ -92,6 +118,7 @@ func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) erro resp.TTL = ttl err = stream.Send(resp) if err != nil { + plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error()) return err } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go index af29ab3b71e7..3657d0360829 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go @@ -47,6 +47,7 @@ type RaftStatusGetter interface { } type AuthGetter interface { + AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) AuthStore() auth.AuthStore } @@ -152,7 +153,7 @@ type authMaintenanceServer struct { } func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error { - authInfo, err := ams.ag.AuthStore().AuthInfoFromCtx(ctx) + authInfo, err := ams.ag.AuthInfoFromCtx(ctx) if err != nil { return err } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go index bcd5dac5183f..91a59389b873 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go @@ -48,21 +48,24 @@ func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) now := time.Now() m := membership.NewMember("", urls, "", &now) - if err = cs.server.AddMember(ctx, *m); err != nil { - return nil, togRPCError(err) + membs, merr := cs.server.AddMember(ctx, *m) + if merr != nil { + return nil, togRPCError(merr) } return &pb.MemberAddResponse{ - Header: cs.header(), - Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs}, + Header: cs.header(), + Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs}, + Members: membersToProtoMembers(membs), }, nil } func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) { - if err := cs.server.RemoveMember(ctx, r.ID); err != nil { + membs, err := cs.server.RemoveMember(ctx, r.ID) + if err != nil { return nil, togRPCError(err) } - return &pb.MemberRemoveResponse{Header: cs.header()}, nil + return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil } func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) { @@ -70,15 +73,23 @@ func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateReq ID: types.ID(r.ID), RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs}, } - if err := cs.server.UpdateMember(ctx, m); err != nil { + membs, err := cs.server.UpdateMember(ctx, m) + if err != nil { return nil, togRPCError(err) } - return &pb.MemberUpdateResponse{Header: cs.header()}, nil + return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil } func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) { - membs := cs.cluster.Members() + membs := membersToProtoMembers(cs.cluster.Members()) + return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil +} +func (cs *ClusterServer) header() *pb.ResponseHeader { + return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()} +} + +func membersToProtoMembers(membs []*membership.Member) []*pb.Member { protoMembs := make([]*pb.Member, len(membs)) for i := range membs { protoMembs[i] = &pb.Member{ @@ -88,10 +99,5 @@ func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest ClientURLs: membs[i].ClientURLs, } } - - return &pb.MemberListResponse{Header: cs.header(), Members: protoMembs}, nil -} - -func (cs *ClusterServer) header() *pb.ResponseHeader { - return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()} + return protoMembs } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go index 5a3cfc0a0dbf..bd17179e9977 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go @@ -17,16 +17,20 @@ package rpctypes import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) var ( // server-side error - ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided") - ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request") - ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request") - ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted") - ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision") - ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded") + ErrGRPCEmptyKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: key is not provided") + ErrGRPCKeyNotFound = grpc.Errorf(codes.InvalidArgument, "etcdserver: key not found") + ErrGRPCValueProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: value is provided") + ErrGRPCLeaseProvided = grpc.Errorf(codes.InvalidArgument, "etcdserver: lease is provided") + ErrGRPCTooManyOps = grpc.Errorf(codes.InvalidArgument, "etcdserver: too many operations in txn request") + ErrGRPCDuplicateKey = grpc.Errorf(codes.InvalidArgument, "etcdserver: duplicate key given in txn request") + ErrGRPCCompacted = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted") + ErrGRPCFutureRev = grpc.Errorf(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision") + ErrGRPCNoSpace = grpc.Errorf(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded") ErrGRPCLeaseNotFound = grpc.Errorf(codes.NotFound, "etcdserver: requested lease not found") ErrGRPCLeaseExist = grpc.Errorf(codes.FailedPrecondition, "etcdserver: lease already exists") @@ -53,6 +57,7 @@ var ( ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role") ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled") ErrGRPCInvalidAuthToken = grpc.Errorf(codes.Unauthenticated, "etcdserver: invalid auth token") + ErrGRPCInvalidAuthMgmt = grpc.Errorf(codes.InvalidArgument, "etcdserver: invalid auth management") ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader") ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable") @@ -63,7 +68,11 @@ var ( ErrGRPCUnhealthy = grpc.Errorf(codes.Unavailable, "etcdserver: unhealthy cluster") errStringToError = map[string]error{ - grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, + grpc.ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey, + grpc.ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound, + grpc.ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided, + grpc.ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided, + grpc.ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps, grpc.ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey, grpc.ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted, @@ -95,6 +104,7 @@ var ( grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted, grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled, grpc.ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken, + grpc.ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt, grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader, grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable, @@ -106,12 +116,15 @@ var ( } // client-side error - ErrEmptyKey = Error(ErrGRPCEmptyKey) - ErrTooManyOps = Error(ErrGRPCTooManyOps) - ErrDuplicateKey = Error(ErrGRPCDuplicateKey) - ErrCompacted = Error(ErrGRPCCompacted) - ErrFutureRev = Error(ErrGRPCFutureRev) - ErrNoSpace = Error(ErrGRPCNoSpace) + ErrEmptyKey = Error(ErrGRPCEmptyKey) + ErrKeyNotFound = Error(ErrGRPCKeyNotFound) + ErrValueProvided = Error(ErrGRPCValueProvided) + ErrLeaseProvided = Error(ErrGRPCLeaseProvided) + ErrTooManyOps = Error(ErrGRPCTooManyOps) + ErrDuplicateKey = Error(ErrGRPCDuplicateKey) + ErrCompacted = Error(ErrGRPCCompacted) + ErrFutureRev = Error(ErrGRPCFutureRev) + ErrNoSpace = Error(ErrGRPCNoSpace) ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound) ErrLeaseExist = Error(ErrGRPCLeaseExist) @@ -138,6 +151,7 @@ var ( ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted) ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled) ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken) + ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt) ErrNoLeader = Error(ErrGRPCNoLeader) ErrNotCapable = Error(ErrGRPCNotCapable) @@ -175,3 +189,10 @@ func Error(err error) error { } return EtcdError{code: grpc.Code(verr), desc: grpc.ErrorDesc(verr)} } + +func ErrorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go index 5a057ed040da..8d38d9bd18fe 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go @@ -42,8 +42,6 @@ func togRPCError(err error) error { return rpctypes.ErrGRPCCompacted case mvcc.ErrFutureRev: return rpctypes.ErrGRPCFutureRev - case lease.ErrLeaseNotFound: - return rpctypes.ErrGRPCLeaseNotFound case etcdserver.ErrRequestTooLarge: return rpctypes.ErrGRPCRequestTooLarge case etcdserver.ErrNoSpace: @@ -63,6 +61,8 @@ func togRPCError(err error) error { return rpctypes.ErrGRPCTimeoutDueToConnectionLost case etcdserver.ErrUnhealthy: return rpctypes.ErrGRPCUnhealthy + case etcdserver.ErrKeyNotFound: + return rpctypes.ErrGRPCKeyNotFound case lease.ErrLeaseNotFound: return rpctypes.ErrGRPCLeaseNotFound @@ -95,6 +95,8 @@ func togRPCError(err error) error { return rpctypes.ErrGRPCAuthNotEnabled case auth.ErrInvalidAuthToken: return rpctypes.ErrGRPCInvalidAuthToken + case auth.ErrInvalidAuthMgmt: + return rpctypes.ErrGRPCInvalidAuthMgmt default: return grpc.Errorf(codes.Unknown, err.Error()) } diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go index f0215531dee1..cd2adf984538 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go +++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go @@ -21,6 +21,7 @@ import ( "golang.org/x/net/context" + "github.com/coreos/etcd/auth" "github.com/coreos/etcd/etcdserver" "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" @@ -33,6 +34,8 @@ type watchServer struct { memberID int64 raftTimer etcdserver.RaftTimer watchable mvcc.WatchableKV + + ag AuthGetter } func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { @@ -41,6 +44,7 @@ func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer { memberID: int64(s.ID()), raftTimer: s, watchable: s.Watchable(), + ag: s, } } @@ -101,6 +105,8 @@ type serverWatchStream struct { // wg waits for the send loop to complete wg sync.WaitGroup + + ag AuthGetter } func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { @@ -118,6 +124,8 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { progress: make(map[mvcc.WatchID]bool), prevKV: make(map[mvcc.WatchID]bool), closec: make(chan struct{}), + + ag: ws.ag, } sws.wg.Add(1) @@ -133,6 +141,7 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { // deadlock when calling sws.close(). go func() { if rerr := sws.recvLoop(); rerr != nil { + plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error()) errc <- rerr } }() @@ -150,6 +159,19 @@ func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) { return err } +func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool { + authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context()) + if err != nil { + return false + } + if authInfo == nil { + // if auth is enabled, IsRangePermitted() can cause an error + authInfo = &auth.AuthInfo{} + } + + return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil +} + func (sws *serverWatchStream) recvLoop() error { for { req, err := sws.gRPCStream.Recv() @@ -171,10 +193,32 @@ func (sws *serverWatchStream) recvLoop() error { // \x00 is the smallest key creq.Key = []byte{0} } + if len(creq.RangeEnd) == 0 { + // force nil since watchstream.Watch distinguishes + // between nil and []byte{} for single key / >= + creq.RangeEnd = nil + } if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 { // support >= key queries creq.RangeEnd = []byte{} } + + if !sws.isWatchPermitted(creq) { + wr := &pb.WatchResponse{ + Header: sws.newResponseHeader(sws.watchStream.Rev()), + WatchId: -1, + Canceled: true, + Created: true, + CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(), + } + + select { + case sws.ctrlStream <- wr: + case <-sws.closec: + } + return nil + } + filters := FiltersFromRequest(creq) wsrev := sws.watchStream.Rev() @@ -294,6 +338,7 @@ func (sws *serverWatchStream) sendLoop() { mvcc.ReportEventReceived(len(evs)) if err := sws.gRPCStream.Send(wr); err != nil { + plog.Debugf("failed to send watch response to gRPC stream (%q)", err.Error()) return } @@ -310,6 +355,7 @@ func (sws *serverWatchStream) sendLoop() { } if err := sws.gRPCStream.Send(c); err != nil { + plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error()) return } @@ -325,6 +371,7 @@ func (sws *serverWatchStream) sendLoop() { for _, v := range pending[wid] { mvcc.ReportEventReceived(len(v.Events)) if err := sws.gRPCStream.Send(v); err != nil { + plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error()) return } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply.go b/vendor/github.com/coreos/etcd/etcdserver/apply.go index e4bf35bc47e4..0be93c52b6f4 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/apply.go +++ b/vendor/github.com/coreos/etcd/etcdserver/apply.go @@ -16,7 +16,6 @@ package etcdserver import ( "bytes" - "fmt" "sort" "time" @@ -30,11 +29,6 @@ import ( ) const ( - // noTxn is an invalid txn ID. - // To apply with independent Range, Put, Delete, you can pass noTxn - // to apply functions instead of a valid txn ID. - noTxn = -1 - warnApplyDuration = 100 * time.Millisecond ) @@ -51,9 +45,9 @@ type applyResult struct { type applierV3 interface { Apply(r *pb.InternalRaftRequest) *applyResult - Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) - Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) - DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) + Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) + Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) + DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) @@ -99,11 +93,11 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls switch { case r.Range != nil: - ar.resp, ar.err = a.s.applyV3.Range(noTxn, r.Range) + ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range) case r.Put != nil: - ar.resp, ar.err = a.s.applyV3.Put(noTxn, r.Put) + ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put) case r.DeleteRange != nil: - ar.resp, ar.err = a.s.applyV3.DeleteRange(noTxn, r.DeleteRange) + ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange) case r.Txn != nil: ar.resp, ar.err = a.s.applyV3.Txn(r.Txn) case r.Compaction != nil: @@ -152,106 +146,87 @@ func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult { return ar } -func (a *applierV3backend) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { - resp := &pb.PutResponse{} +func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) { + resp = &pb.PutResponse{} resp.Header = &pb.ResponseHeader{} - var ( - rev int64 - err error - ) - var rr *mvcc.RangeResult - if p.PrevKv { - if txnID != noTxn { - rr, err = a.s.KV().TxnRange(txnID, p.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } else { - rr, err = a.s.KV().Range(p.Key, nil, mvcc.RangeOptions{}) - if err != nil { - return nil, err + val, leaseID := p.Value, lease.LeaseID(p.Lease) + if txn == nil { + if leaseID != lease.NoLease { + if l := a.s.lessor.Lookup(leaseID); l == nil { + return nil, lease.ErrLeaseNotFound } } + txn = a.s.KV().Write() + defer txn.End() } - if txnID != noTxn { - rev, err = a.s.KV().TxnPut(txnID, p.Key, p.Value, lease.LeaseID(p.Lease)) + var rr *mvcc.RangeResult + if p.IgnoreValue || p.IgnoreLease || p.PrevKv { + rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{}) if err != nil { return nil, err } - } else { - leaseID := lease.LeaseID(p.Lease) - if leaseID != lease.NoLease { - if l := a.s.lessor.Lookup(leaseID); l == nil { - return nil, lease.ErrLeaseNotFound - } + } + if p.IgnoreValue || p.IgnoreLease { + if rr == nil || len(rr.KVs) == 0 { + // ignore_{lease,value} flag expects previous key-value pair + return nil, ErrKeyNotFound } - rev = a.s.KV().Put(p.Key, p.Value, leaseID) } - resp.Header.Revision = rev - if rr != nil && len(rr.KVs) != 0 { - resp.PrevKv = &rr.KVs[0] + if p.IgnoreValue { + val = rr.KVs[0].Value + } + if p.IgnoreLease { + leaseID = lease.LeaseID(rr.KVs[0].Lease) + } + if p.PrevKv { + if rr != nil && len(rr.KVs) != 0 { + resp.PrevKv = &rr.KVs[0] + } } + + resp.Header.Revision = txn.Put(p.Key, val, leaseID) return resp, nil } -func (a *applierV3backend) DeleteRange(txnID int64, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { +func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { resp := &pb.DeleteRangeResponse{} resp.Header = &pb.ResponseHeader{} - var ( - n int64 - rev int64 - err error - ) + if txn == nil { + txn = a.s.kv.Write() + defer txn.End() + } if isGteRange(dr.RangeEnd) { dr.RangeEnd = []byte{} } - var rr *mvcc.RangeResult if dr.PrevKv { - if txnID != noTxn { - rr, err = a.s.KV().TxnRange(txnID, dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } else { - rr, err = a.s.KV().Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) - if err != nil { - return nil, err - } - } - } - - if txnID != noTxn { - n, rev, err = a.s.KV().TxnDeleteRange(txnID, dr.Key, dr.RangeEnd) + rr, err := txn.Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{}) if err != nil { return nil, err } - } else { - n, rev = a.s.KV().DeleteRange(dr.Key, dr.RangeEnd) - } - - resp.Deleted = n - if rr != nil { - for i := range rr.KVs { - resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i]) + if rr != nil { + for i := range rr.KVs { + resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i]) + } } } - resp.Header.Revision = rev + + resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, dr.RangeEnd) return resp, nil } -func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) { +func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { resp := &pb.RangeResponse{} resp.Header = &pb.ResponseHeader{} - var ( - rr *mvcc.RangeResult - err error - ) + if txn == nil { + txn = a.s.kv.Read() + defer txn.End() + } if isGteRange(r.RangeEnd) { r.RangeEnd = []byte{} @@ -275,16 +250,9 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp Count: r.CountOnly, } - if txnID != noTxn { - rr, err = a.s.KV().TxnRange(txnID, r.Key, r.RangeEnd, ro) - if err != nil { - return nil, err - } - } else { - rr, err = a.s.KV().Range(r.Key, r.RangeEnd, ro) - if err != nil { - return nil, err - } + rr, err := txn.Range(r.Key, r.RangeEnd, ro) + if err != nil { + return nil, err } if r.MaxModRevision != 0 { @@ -350,61 +318,64 @@ func (a *applierV3backend) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResp } func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) { - ok := true - for _, c := range rt.Compare { - if _, ok = a.applyCompare(c); !ok { - break - } - } + isWrite := !isTxnReadonly(rt) + txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read()) - var reqs []*pb.RequestOp - if ok { - reqs = rt.Success - } else { - reqs = rt.Failure - } - - if err := a.checkRequestLeases(reqs); err != nil { - return nil, err + reqs, ok := a.compareToOps(txn, rt) + if isWrite { + if err := a.checkRequestPut(txn, reqs); err != nil { + txn.End() + return nil, err + } } - if err := a.checkRequestRange(reqs); err != nil { + if err := checkRequestRange(txn, reqs); err != nil { + txn.End() return nil, err } - // When executing the operations of txn, we need to hold the txn lock. - // So the reader will not see any intermediate results. - txnID := a.s.KV().TxnBegin() - resps := make([]*pb.ResponseOp, len(reqs)) - for i := range reqs { - resps[i] = a.applyUnion(txnID, reqs[i]) + txnResp := &pb.TxnResponse{ + Responses: resps, + Succeeded: ok, + Header: &pb.ResponseHeader{}, } - err := a.s.KV().TxnEnd(txnID) - if err != nil { - panic(fmt.Sprint("unexpected error when closing txn", txnID)) + // When executing mutable txn ops, etcd must hold the txn lock so + // readers do not see any intermediate results. Since writes are + // serialized on the raft loop, the revision in the read view will + // be the revision of the write txn. + if isWrite { + txn.End() + txn = a.s.KV().Write() } + for i := range reqs { + resps[i] = a.applyUnion(txn, reqs[i]) + } + rev := txn.Rev() + if len(txn.Changes()) != 0 { + rev++ + } + txn.End() - txnResp := &pb.TxnResponse{} - txnResp.Header = &pb.ResponseHeader{} - txnResp.Header.Revision = a.s.KV().Rev() - txnResp.Responses = resps - txnResp.Succeeded = ok + txnResp.Header.Revision = rev return txnResp, nil } -// applyCompare applies the compare request. -// It returns the revision at which the comparison happens. If the comparison -// succeeds, the it returns true. Otherwise it returns false. -func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) { - rr, err := a.s.KV().Range(c.Key, nil, mvcc.RangeOptions{}) - rev := rr.Rev +func (a *applierV3backend) compareToOps(rv mvcc.ReadView, rt *pb.TxnRequest) ([]*pb.RequestOp, bool) { + for _, c := range rt.Compare { + if !applyCompare(rv, c) { + return rt.Failure, false + } + } + return rt.Success, true +} +// applyCompare applies the compare request. +// If the comparison succeeds, it returns true. Otherwise, returns false. +func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool { + rr, err := rv.Range(c.Key, nil, mvcc.RangeOptions{}) if err != nil { - if err == mvcc.ErrTxnIDMismatch { - panic("unexpected txn ID mismatch error") - } - return rev, false + return false } var ckv mvccpb.KeyValue if len(rr.KVs) != 0 { @@ -416,7 +387,7 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) { // We can treat non-existence as the empty set explicitly, such that // even a key with a value of length 0 bytes is still a real key // that was written that way - return rev, false + return false } } @@ -448,30 +419,22 @@ func (a *applierV3backend) applyCompare(c *pb.Compare) (int64, bool) { switch c.Result { case pb.Compare_EQUAL: - if result != 0 { - return rev, false - } + return result == 0 case pb.Compare_NOT_EQUAL: - if result == 0 { - return rev, false - } + return result != 0 case pb.Compare_GREATER: - if result != 1 { - return rev, false - } + return result > 0 case pb.Compare_LESS: - if result != -1 { - return rev, false - } + return result < 0 } - return rev, true + return true } -func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.ResponseOp { +func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *pb.ResponseOp { switch tv := union.Request.(type) { case *pb.RequestOp_RequestRange: if tv.RequestRange != nil { - resp, err := a.Range(txnID, tv.RequestRange) + resp, err := a.Range(txn, tv.RequestRange) if err != nil { plog.Panicf("unexpected error during txn: %v", err) } @@ -479,7 +442,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp } case *pb.RequestOp_RequestPut: if tv.RequestPut != nil { - resp, err := a.Put(txnID, tv.RequestPut) + resp, err := a.Put(txn, tv.RequestPut) if err != nil { plog.Panicf("unexpected error during txn: %v", err) } @@ -487,7 +450,7 @@ func (a *applierV3backend) applyUnion(txnID int64, union *pb.RequestOp) *pb.Resp } case *pb.RequestOp_RequestDeleteRange: if tv.RequestDeleteRange != nil { - resp, err := a.DeleteRange(txnID, tv.RequestDeleteRange) + resp, err := a.DeleteRange(txn, tv.RequestDeleteRange) if err != nil { plog.Panicf("unexpected error during txn: %v", err) } @@ -588,7 +551,7 @@ type applierV3Capped struct { // with Puts so that the number of keys in the store is capped. func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} } -func (a *applierV3Capped) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { +func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { return nil, ErrNoSpace } @@ -617,7 +580,7 @@ func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) { } func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) { - ctx := context.WithValue(context.WithValue(context.Background(), "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken) + ctx := context.WithValue(context.WithValue(a.s.ctx, "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken) resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password) if resp != nil { resp.Header = newHeader(a.s) @@ -738,9 +701,9 @@ func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 { return "aApplierV3{app, NewBackendQuota(s)} } -func (a *quotaApplierV3) Put(txnID int64, p *pb.PutRequest) (*pb.PutResponse, error) { +func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) { ok := a.q.Available(p) - resp, err := a.applierV3.Put(txnID, p) + resp, err := a.applierV3.Put(txn, p) if err == nil && !ok { err = ErrNoSpace } @@ -804,14 +767,27 @@ func (s *kvSortByValue) Less(i, j int) bool { return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0 } -func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestOp) error { +func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqs []*pb.RequestOp) error { for _, requ := range reqs { tv, ok := requ.Request.(*pb.RequestOp_RequestPut) if !ok { continue } preq := tv.RequestPut - if preq == nil || lease.LeaseID(preq.Lease) == lease.NoLease { + if preq == nil { + continue + } + if preq.IgnoreValue || preq.IgnoreLease { + // expects previous key-value, error if not exist + rr, err := rv.Range(preq.Key, nil, mvcc.RangeOptions{}) + if err != nil { + return err + } + if rr == nil || len(rr.KVs) == 0 { + return ErrKeyNotFound + } + } + if lease.LeaseID(preq.Lease) == lease.NoLease { continue } if l := a.s.lessor.Lookup(lease.LeaseID(preq.Lease)); l == nil { @@ -821,7 +797,7 @@ func (a *applierV3backend) checkRequestLeases(reqs []*pb.RequestOp) error { return nil } -func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error { +func checkRequestRange(rv mvcc.ReadView, reqs []*pb.RequestOp) error { for _, requ := range reqs { tv, ok := requ.Request.(*pb.RequestOp_RequestRange) if !ok { @@ -832,10 +808,10 @@ func (a *applierV3backend) checkRequestRange(reqs []*pb.RequestOp) error { continue } - if greq.Revision > a.s.KV().Rev() { + if greq.Revision > rv.Rev() { return mvcc.ErrFutureRev } - if greq.Revision < a.s.KV().FirstRev() { + if greq.Revision < rv.FirstRev() { return mvcc.ErrCompacted } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go index 4868e855ca12..7da4ae45df5d 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go +++ b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go @@ -19,6 +19,7 @@ import ( "github.com/coreos/etcd/auth" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "github.com/coreos/etcd/mvcc" ) type authApplierV3 struct { @@ -58,7 +59,7 @@ func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult { return ret } -func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, error) { +func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) { if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil { return nil, err } @@ -68,17 +69,17 @@ func (aa *authApplierV3) Put(txnID int64, r *pb.PutRequest) (*pb.PutResponse, er return nil, err } } - return aa.applierV3.Put(txnID, r) + return aa.applierV3.Put(txn, r) } -func (aa *authApplierV3) Range(txnID int64, r *pb.RangeRequest) (*pb.RangeResponse, error) { +func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) { if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { return nil, err } - return aa.applierV3.Range(txnID, r) + return aa.applierV3.Range(txn, r) } -func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { +func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil { return nil, err } @@ -89,7 +90,7 @@ func (aa *authApplierV3) DeleteRange(txnID int64, r *pb.DeleteRangeRequest) (*pb } } - return aa.applierV3.DeleteRange(txnID, r) + return aa.applierV3.DeleteRange(txn, r) } func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error { diff --git a/vendor/github.com/coreos/etcd/etcdserver/backend.go b/vendor/github.com/coreos/etcd/etcdserver/backend.go new file mode 100644 index 000000000000..c5e2dabf3e71 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/backend.go @@ -0,0 +1,81 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcdserver + +import ( + "fmt" + "os" + "time" + + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/raft/raftpb" + "github.com/coreos/etcd/snap" +) + +func newBackend(cfg *ServerConfig) backend.Backend { + bcfg := backend.DefaultBackendConfig() + bcfg.Path = cfg.backendPath() + if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes { + // permit 10% excess over quota for disarm + bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10) + } + return backend.New(bcfg) +} + +// openSnapshotBackend renames a snapshot db to the current etcd db and opens it. +func openSnapshotBackend(cfg *ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) { + snapPath, err := ss.DBFilePath(snapshot.Metadata.Index) + if err != nil { + return nil, fmt.Errorf("database snapshot file path error: %v", err) + } + if err := os.Rename(snapPath, cfg.backendPath()); err != nil { + return nil, fmt.Errorf("rename snapshot file error: %v", err) + } + return openBackend(cfg), nil +} + +// openBackend returns a backend using the current etcd db. +func openBackend(cfg *ServerConfig) backend.Backend { + fn := cfg.backendPath() + beOpened := make(chan backend.Backend) + go func() { + beOpened <- newBackend(cfg) + }() + select { + case be := <-beOpened: + return be + case <-time.After(time.Second): + plog.Warningf("another etcd process is using %q and holds the file lock.", fn) + plog.Warningf("waiting for it to exit before starting...") + } + return <-beOpened +} + +// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes +// before updating the backend db after persisting raft snapshot to disk, +// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this +// case, replace the db with the snapshot db sent by the leader. +func recoverSnapshotBackend(cfg *ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) { + var cIndex consistentIndex + kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex) + defer kv.Close() + if snapshot.Metadata.Index <= kv.ConsistentIndex() { + return oldbe, nil + } + oldbe.Close() + return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot) +} diff --git a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go index fa84ffae6307..f44862a46380 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go +++ b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go @@ -23,7 +23,6 @@ import ( "time" "github.com/coreos/etcd/etcdserver/membership" - "github.com/coreos/etcd/pkg/httputil" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/version" "github.com/coreos/go-semver/semver" @@ -241,15 +240,6 @@ func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err) continue } - // etcd 2.0 does not have version endpoint on peer url. - if resp.StatusCode == http.StatusNotFound { - httputil.GracefulClose(resp) - return &version.Versions{ - Server: "2.0.0", - Cluster: "2.0.0", - }, nil - } - var b []byte b, err = ioutil.ReadAll(resp.Body) resp.Body.Close() diff --git a/vendor/github.com/coreos/etcd/etcdserver/config.go b/vendor/github.com/coreos/etcd/etcdserver/config.go index 9bcac0f076b9..ae8a4d08e355 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/config.go +++ b/vendor/github.com/coreos/etcd/etcdserver/config.go @@ -55,10 +55,17 @@ type ServerConfig struct { AutoCompactionRetention int QuotaBackendBytes int64 + // MaxRequestBytes is the maximum request size to send over raft. + MaxRequestBytes uint + StrictReconfigCheck bool // ClientCertAuthEnabled is true when cert has been signed by the client CA. ClientCertAuthEnabled bool + + AuthToken string + + Debug bool } // VerifyBootstrap sanity-checks the initial config for bootstrap case @@ -198,3 +205,5 @@ func (c *ServerConfig) bootstrapTimeout() time.Duration { } return time.Second } + +func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") } diff --git a/vendor/github.com/coreos/etcd/etcdserver/errors.go b/vendor/github.com/coreos/etcd/etcdserver/errors.go index 5edc155624bc..ed749dbe8d88 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/errors.go +++ b/vendor/github.com/coreos/etcd/etcdserver/errors.go @@ -33,6 +33,7 @@ var ( ErrNoSpace = errors.New("etcdserver: no space") ErrTooManyRequests = errors.New("etcdserver: too many requests") ErrUnhealthy = errors.New("etcdserver: unhealthy cluster") + ErrKeyNotFound = errors.New("etcdserver: key not found") ) type DiscoveryError struct { diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go index f34bedf3ed3a..aabf90061f6e 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go @@ -1018,7 +1018,7 @@ func init() { proto.RegisterFile("etcdserver.proto", fileDescriptorEtcdserver) } var fileDescriptorEtcdserver = []byte{ // 380 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30, 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb, 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58, 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f, diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go new file mode 100644 index 000000000000..02a23b78c107 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go @@ -0,0 +1,1996 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: etcdserver/etcdserverpb/rpc.proto + +/* +Package etcdserverpb is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package gw + +import ( + "github.com/coreos/etcd/etcdserver/etcdserverpb" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.RangeRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Range(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.PutRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Put(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.DeleteRangeRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.DeleteRange(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.TxnRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Txn(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.CompactionRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Compact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.WatchClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Watch_WatchClient, runtime.ServerMetadata, error) { + var metadata runtime.ServerMetadata + stream, err := client.Watch(ctx) + if err != nil { + grpclog.Printf("Failed to start streaming: %v", err) + return nil, metadata, err + } + dec := marshaler.NewDecoder(req.Body) + handleSend := func() error { + var protoReq etcdserverpb.WatchRequest + err = dec.Decode(&protoReq) + if err == io.EOF { + return err + } + if err != nil { + grpclog.Printf("Failed to decode request: %v", err) + return err + } + if err = stream.Send(&protoReq); err != nil { + grpclog.Printf("Failed to send request: %v", err) + return err + } + return nil + } + if err := handleSend(); err != nil { + if cerr := stream.CloseSend(); cerr != nil { + grpclog.Printf("Failed to terminate client stream: %v", cerr) + } + if err == io.EOF { + return stream, metadata, nil + } + return nil, metadata, err + } + go func() { + for { + if err := handleSend(); err != nil { + break + } + } + if err := stream.CloseSend(); err != nil { + grpclog.Printf("Failed to terminate client stream: %v", err) + } + }() + header, err := stream.Header() + if err != nil { + grpclog.Printf("Failed to get header from client: %v", err) + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil +} + +func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.LeaseGrantRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.LeaseGrant(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.LeaseRevokeRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) { + var metadata runtime.ServerMetadata + stream, err := client.LeaseKeepAlive(ctx) + if err != nil { + grpclog.Printf("Failed to start streaming: %v", err) + return nil, metadata, err + } + dec := marshaler.NewDecoder(req.Body) + handleSend := func() error { + var protoReq etcdserverpb.LeaseKeepAliveRequest + err = dec.Decode(&protoReq) + if err == io.EOF { + return err + } + if err != nil { + grpclog.Printf("Failed to decode request: %v", err) + return err + } + if err = stream.Send(&protoReq); err != nil { + grpclog.Printf("Failed to send request: %v", err) + return err + } + return nil + } + if err := handleSend(); err != nil { + if cerr := stream.CloseSend(); cerr != nil { + grpclog.Printf("Failed to terminate client stream: %v", cerr) + } + if err == io.EOF { + return stream, metadata, nil + } + return nil, metadata, err + } + go func() { + for { + if err := handleSend(); err != nil { + break + } + } + if err := stream.CloseSend(); err != nil { + grpclog.Printf("Failed to terminate client stream: %v", err) + } + }() + header, err := stream.Header() + if err != nil { + grpclog.Printf("Failed to get header from client: %v", err) + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil +} + +func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.LeaseTimeToLiveRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberAddRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.MemberAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberRemoveRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.MemberRemove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberUpdateRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.MemberUpdate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.MemberListRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.MemberList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AlarmRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Alarm(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.StatusRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.DefragmentRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Defragment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.HashRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Hash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Maintenance_SnapshotClient, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.SnapshotRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + stream, err := client.Snapshot(ctx, &protoReq) + if err != nil { + return nil, metadata, err + } + header, err := stream.Header() + if err != nil { + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil + +} + +func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthEnableRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.AuthEnable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthDisableRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.AuthDisable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthenticateRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Authenticate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserAddRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.UserAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserGetRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.UserGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserListRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.UserList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserDeleteRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.UserDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserChangePasswordRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.UserChangePassword(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserGrantRoleRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.UserGrantRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthUserRevokeRoleRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.UserRevokeRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleAddRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.RoleAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleGetRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.RoleGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleListRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.RoleList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleDeleteRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.RoleDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleGrantPermissionRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.RoleGrantPermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq etcdserverpb.AuthRoleRevokePermissionRequest + var metadata runtime.ServerMetadata + + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.RoleRevokePermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +// RegisterKVHandlerFromEndpoint is same as RegisterKVHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterKVHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterKVHandler(ctx, mux, conn) +} + +// RegisterKVHandler registers the http handlers for service KV to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterKVHandlerClient(ctx, mux, etcdserverpb.NewKVClient(conn)) +} + +// RegisterKVHandler registers the http handlers for service KV to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "KVClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "KVClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "KVClient" to call the correct interceptors. +func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.KVClient) error { + + mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_KV_Range_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_KV_Range_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_KV_Put_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_KV_Put_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_KV_DeleteRange_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_KV_DeleteRange_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_KV_Txn_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_KV_Txn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_KV_Compact_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_KV_Compact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_KV_Range_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "kv", "range"}, "")) + + pattern_KV_Put_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "kv", "put"}, "")) + + pattern_KV_DeleteRange_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "kv", "deleterange"}, "")) + + pattern_KV_Txn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "kv", "txn"}, "")) + + pattern_KV_Compact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "kv", "compaction"}, "")) +) + +var ( + forward_KV_Range_0 = runtime.ForwardResponseMessage + + forward_KV_Put_0 = runtime.ForwardResponseMessage + + forward_KV_DeleteRange_0 = runtime.ForwardResponseMessage + + forward_KV_Txn_0 = runtime.ForwardResponseMessage + + forward_KV_Compact_0 = runtime.ForwardResponseMessage +) + +// RegisterWatchHandlerFromEndpoint is same as RegisterWatchHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterWatchHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterWatchHandler(ctx, mux, conn) +} + +// RegisterWatchHandler registers the http handlers for service Watch to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterWatchHandlerClient(ctx, mux, etcdserverpb.NewWatchClient(conn)) +} + +// RegisterWatchHandler registers the http handlers for service Watch to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "WatchClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WatchClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "WatchClient" to call the correct interceptors. +func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.WatchClient) error { + + mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Watch_Watch_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Watch_Watch_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Watch_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v3alpha", "watch"}, "")) +) + +var ( + forward_Watch_Watch_0 = runtime.ForwardResponseStream +) + +// RegisterLeaseHandlerFromEndpoint is same as RegisterLeaseHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterLeaseHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterLeaseHandler(ctx, mux, conn) +} + +// RegisterLeaseHandler registers the http handlers for service Lease to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterLeaseHandlerClient(ctx, mux, etcdserverpb.NewLeaseClient(conn)) +} + +// RegisterLeaseHandler registers the http handlers for service Lease to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "LeaseClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LeaseClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "LeaseClient" to call the correct interceptors. +func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.LeaseClient) error { + + mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lease_LeaseGrant_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lease_LeaseGrant_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lease_LeaseRevoke_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lease_LeaseKeepAlive_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lease_LeaseKeepAlive_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lease_LeaseTimeToLive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Lease_LeaseGrant_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lease", "grant"}, "")) + + pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "kv", "lease", "revoke"}, "")) + + pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lease", "keepalive"}, "")) + + pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "kv", "lease", "timetolive"}, "")) +) + +var ( + forward_Lease_LeaseGrant_0 = runtime.ForwardResponseMessage + + forward_Lease_LeaseRevoke_0 = runtime.ForwardResponseMessage + + forward_Lease_LeaseKeepAlive_0 = runtime.ForwardResponseStream + + forward_Lease_LeaseTimeToLive_0 = runtime.ForwardResponseMessage +) + +// RegisterClusterHandlerFromEndpoint is same as RegisterClusterHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterClusterHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterClusterHandler(ctx, mux, conn) +} + +// RegisterClusterHandler registers the http handlers for service Cluster to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterClusterHandlerClient(ctx, mux, etcdserverpb.NewClusterClient(conn)) +} + +// RegisterClusterHandler registers the http handlers for service Cluster to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "ClusterClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ClusterClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ClusterClient" to call the correct interceptors. +func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.ClusterClient) error { + + mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Cluster_MemberAdd_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Cluster_MemberAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Cluster_MemberRemove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Cluster_MemberRemove_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Cluster_MemberRemove_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Cluster_MemberUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Cluster_MemberUpdate_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Cluster_MemberUpdate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Cluster_MemberList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Cluster_MemberList_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Cluster_MemberList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Cluster_MemberAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "cluster", "member", "add"}, "")) + + pattern_Cluster_MemberRemove_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "cluster", "member", "remove"}, "")) + + pattern_Cluster_MemberUpdate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "cluster", "member", "update"}, "")) + + pattern_Cluster_MemberList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "cluster", "member", "list"}, "")) +) + +var ( + forward_Cluster_MemberAdd_0 = runtime.ForwardResponseMessage + + forward_Cluster_MemberRemove_0 = runtime.ForwardResponseMessage + + forward_Cluster_MemberUpdate_0 = runtime.ForwardResponseMessage + + forward_Cluster_MemberList_0 = runtime.ForwardResponseMessage +) + +// RegisterMaintenanceHandlerFromEndpoint is same as RegisterMaintenanceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterMaintenanceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterMaintenanceHandler(ctx, mux, conn) +} + +// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterMaintenanceHandlerClient(ctx, mux, etcdserverpb.NewMaintenanceClient(conn)) +} + +// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "MaintenanceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MaintenanceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "MaintenanceClient" to call the correct interceptors. +func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.MaintenanceClient) error { + + mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Maintenance_Alarm_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Maintenance_Alarm_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Maintenance_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Maintenance_Status_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Maintenance_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Maintenance_Defragment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Maintenance_Defragment_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Maintenance_Defragment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Maintenance_Hash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Maintenance_Hash_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Maintenance_Hash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Maintenance_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Maintenance_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Maintenance_Snapshot_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Maintenance_Alarm_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "alarm"}, "")) + + pattern_Maintenance_Status_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "status"}, "")) + + pattern_Maintenance_Defragment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "defragment"}, "")) + + pattern_Maintenance_Hash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "hash"}, "")) + + pattern_Maintenance_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "snapshot"}, "")) +) + +var ( + forward_Maintenance_Alarm_0 = runtime.ForwardResponseMessage + + forward_Maintenance_Status_0 = runtime.ForwardResponseMessage + + forward_Maintenance_Defragment_0 = runtime.ForwardResponseMessage + + forward_Maintenance_Hash_0 = runtime.ForwardResponseMessage + + forward_Maintenance_Snapshot_0 = runtime.ForwardResponseStream +) + +// RegisterAuthHandlerFromEndpoint is same as RegisterAuthHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterAuthHandler(ctx, mux, conn) +} + +// RegisterAuthHandler registers the http handlers for service Auth to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterAuthHandlerClient(ctx, mux, etcdserverpb.NewAuthClient(conn)) +} + +// RegisterAuthHandler registers the http handlers for service Auth to "mux". +// The handlers forward requests to the grpc endpoint over the given implementation of "AuthClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "AuthClient" to call the correct interceptors. +func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.AuthClient) error { + + mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Auth_AuthEnable_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Auth_AuthEnable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Auth_AuthDisable_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Auth_AuthDisable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Auth_Authenticate_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Auth_Authenticate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Auth_UserAdd_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Auth_UserAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Auth_UserGet_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Auth_UserGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Auth_UserList_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Auth_UserList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Auth_UserDelete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Auth_UserDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Auth_UserChangePassword_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Auth_UserChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Auth_UserGrantRole_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Auth_UserGrantRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Auth_UserRevokeRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Auth_RoleAdd_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Auth_RoleAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Auth_RoleGet_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Auth_RoleGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Auth_RoleList_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Auth_RoleList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Auth_RoleDelete_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Auth_RoleDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Auth_RoleGrantPermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("POST", pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if cn, ok := w.(http.CloseNotifier); ok { + go func(done <-chan struct{}, closed <-chan bool) { + select { + case <-done: + case <-closed: + cancel() + } + }(ctx.Done(), cn.CloseNotify()) + } + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Auth_RoleRevokePermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Auth_AuthEnable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "auth", "enable"}, "")) + + pattern_Auth_AuthDisable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "auth", "disable"}, "")) + + pattern_Auth_Authenticate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "auth", "authenticate"}, "")) + + pattern_Auth_UserAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "add"}, "")) + + pattern_Auth_UserGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "get"}, "")) + + pattern_Auth_UserList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "list"}, "")) + + pattern_Auth_UserDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "delete"}, "")) + + pattern_Auth_UserChangePassword_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "changepw"}, "")) + + pattern_Auth_UserGrantRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "grant"}, "")) + + pattern_Auth_UserRevokeRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "revoke"}, "")) + + pattern_Auth_RoleAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "add"}, "")) + + pattern_Auth_RoleGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "get"}, "")) + + pattern_Auth_RoleList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "list"}, "")) + + pattern_Auth_RoleDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "delete"}, "")) + + pattern_Auth_RoleGrantPermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "grant"}, "")) + + pattern_Auth_RoleRevokePermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "revoke"}, "")) +) + +var ( + forward_Auth_AuthEnable_0 = runtime.ForwardResponseMessage + + forward_Auth_AuthDisable_0 = runtime.ForwardResponseMessage + + forward_Auth_Authenticate_0 = runtime.ForwardResponseMessage + + forward_Auth_UserAdd_0 = runtime.ForwardResponseMessage + + forward_Auth_UserGet_0 = runtime.ForwardResponseMessage + + forward_Auth_UserList_0 = runtime.ForwardResponseMessage + + forward_Auth_UserDelete_0 = runtime.ForwardResponseMessage + + forward_Auth_UserChangePassword_0 = runtime.ForwardResponseMessage + + forward_Auth_UserGrantRole_0 = runtime.ForwardResponseMessage + + forward_Auth_UserRevokeRole_0 = runtime.ForwardResponseMessage + + forward_Auth_RoleAdd_0 = runtime.ForwardResponseMessage + + forward_Auth_RoleGet_0 = runtime.ForwardResponseMessage + + forward_Auth_RoleList_0 = runtime.ForwardResponseMessage + + forward_Auth_RoleDelete_0 = runtime.ForwardResponseMessage + + forward_Auth_RoleGrantPermission_0 = runtime.ForwardResponseMessage + + forward_Auth_RoleRevokePermission_0 = runtime.ForwardResponseMessage +) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go index 66890c93c440..44a3b6f69eb9 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go @@ -2038,7 +2038,7 @@ func init() { proto.RegisterFile("raft_internal.proto", fileDescriptorRaftIntern var fileDescriptorRaftInternal = []byte{ // 837 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdd, 0x52, 0xd3, 0x40, 0x14, 0xc7, 0x69, 0xf9, 0xec, 0xb6, 0x54, 0x5c, 0x40, 0xd7, 0x32, 0x53, 0xa1, 0x8c, 0x8a, 0x5f, 0xe8, 0x94, 0x07, 0xd0, 0x4a, 0x19, 0x60, 0x86, 0x61, 0x98, 0x0c, 0xce, 0x38, 0xe3, 0x45, 0x5c, 0x9a, 0x43, 0x1b, 0x49, 0x93, 0xb8, 0xd9, 0x56, 0x7c, 0x13, 0x1f, 0xc3, 0xaf, 0x87, 0xe0, 0xc2, diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go index b28f2e50e3c4..894c815f824c 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go @@ -15,6 +15,8 @@ import ( authpb "github.com/coreos/etcd/auth/authpb" + _ "google.golang.org/genproto/googleapis/api/annotations" + context "golang.org/x/net/context" grpc "google.golang.org/grpc" @@ -223,16 +225,45 @@ func (m *ResponseHeader) String() string { return proto.CompactTextSt func (*ResponseHeader) ProtoMessage() {} func (*ResponseHeader) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} } +func (m *ResponseHeader) GetClusterId() uint64 { + if m != nil { + return m.ClusterId + } + return 0 +} + +func (m *ResponseHeader) GetMemberId() uint64 { + if m != nil { + return m.MemberId + } + return 0 +} + +func (m *ResponseHeader) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *ResponseHeader) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + type RangeRequest struct { // key is the first key for the range. If range_end is not given, the request only looks up key. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // range_end is the upper bound on the requested range [key, range_end). // If range_end is '\0', the range is all keys >= key. - // If the range_end is one bit larger than the given key, - // then the range requests get the all keys with the prefix (the given key). - // If both key and range_end are '\0', then range requests returns all keys. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` - // limit is a limit on the number of keys returned for the request. + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"` // revision is the point-in-time of the key-value store to use for the range. // If revision is less or equal to zero, the range is over the newest key-value store. @@ -272,6 +303,97 @@ func (m *RangeRequest) String() string { return proto.CompactTextStri func (*RangeRequest) ProtoMessage() {} func (*RangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} } +func (m *RangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *RangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *RangeRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *RangeRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder { + if m != nil { + return m.SortOrder + } + return RangeRequest_NONE +} + +func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget { + if m != nil { + return m.SortTarget + } + return RangeRequest_KEY +} + +func (m *RangeRequest) GetSerializable() bool { + if m != nil { + return m.Serializable + } + return false +} + +func (m *RangeRequest) GetKeysOnly() bool { + if m != nil { + return m.KeysOnly + } + return false +} + +func (m *RangeRequest) GetCountOnly() bool { + if m != nil { + return m.CountOnly + } + return false +} + +func (m *RangeRequest) GetMinModRevision() int64 { + if m != nil { + return m.MinModRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxModRevision() int64 { + if m != nil { + return m.MaxModRevision + } + return 0 +} + +func (m *RangeRequest) GetMinCreateRevision() int64 { + if m != nil { + return m.MinCreateRevision + } + return 0 +} + +func (m *RangeRequest) GetMaxCreateRevision() int64 { + if m != nil { + return m.MaxCreateRevision + } + return 0 +} + type RangeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // kvs is the list of key-value pairs matched by the range request. @@ -302,6 +424,20 @@ func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue { return nil } +func (m *RangeResponse) GetMore() bool { + if m != nil { + return m.More + } + return false +} + +func (m *RangeResponse) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + type PutRequest struct { // key is the key, in bytes, to put into the key-value store. Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -313,6 +449,12 @@ type PutRequest struct { // If prev_kv is set, etcd gets the previous key-value pair before changing it. // The previous key-value pair will be returned in the put response. PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"` + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"` } func (m *PutRequest) Reset() { *m = PutRequest{} } @@ -320,6 +462,48 @@ func (m *PutRequest) String() string { return proto.CompactTextString func (*PutRequest) ProtoMessage() {} func (*PutRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} } +func (m *PutRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *PutRequest) GetLease() int64 { + if m != nil { + return m.Lease + } + return 0 +} + +func (m *PutRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + +func (m *PutRequest) GetIgnoreValue() bool { + if m != nil { + return m.IgnoreValue + } + return false +} + +func (m *PutRequest) GetIgnoreLease() bool { + if m != nil { + return m.IgnoreLease + } + return false +} + type PutResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // if prev_kv is set in the request, the previous key-value pair will be returned. @@ -350,12 +534,12 @@ type DeleteRangeRequest struct { Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // range_end is the key following the last key to delete for the range [key, range_end). // If range_end is not given, the range is defined to contain only the key argument. - // If range_end is one bit larger than the given key, then the range is all - // the all keys with the prefix (the given key). + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). // If range_end is '\0', the range is all keys greater than or equal to the key argument. RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"` // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. - // The previous key-value pairs will be returned in the delte response. + // The previous key-value pairs will be returned in the delete response. PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"` } @@ -364,6 +548,27 @@ func (m *DeleteRangeRequest) String() string { return proto.CompactTe func (*DeleteRangeRequest) ProtoMessage() {} func (*DeleteRangeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} } +func (m *DeleteRangeRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRangeRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *DeleteRangeRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + type DeleteRangeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // deleted is the number of keys deleted by the delete range request. @@ -384,6 +589,13 @@ func (m *DeleteRangeResponse) GetHeader() *ResponseHeader { return nil } +func (m *DeleteRangeResponse) GetDeleted() int64 { + if m != nil { + return m.Deleted + } + return 0 +} + func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue { if m != nil { return m.PrevKvs @@ -754,6 +966,27 @@ func (m *Compare) GetTargetUnion() isCompare_TargetUnion { return nil } +func (m *Compare) GetResult() Compare_CompareResult { + if m != nil { + return m.Result + } + return Compare_EQUAL +} + +func (m *Compare) GetTarget() Compare_CompareTarget { + if m != nil { + return m.Target + } + return Compare_VERSION +} + +func (m *Compare) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + func (m *Compare) GetVersion() int64 { if x, ok := m.GetTargetUnion().(*Compare_Version); ok { return x.Version @@ -950,6 +1183,13 @@ func (m *TxnResponse) GetHeader() *ResponseHeader { return nil } +func (m *TxnResponse) GetSucceeded() bool { + if m != nil { + return m.Succeeded + } + return false +} + func (m *TxnResponse) GetResponses() []*ResponseOp { if m != nil { return m.Responses @@ -973,6 +1213,20 @@ func (m *CompactionRequest) String() string { return proto.CompactTex func (*CompactionRequest) ProtoMessage() {} func (*CompactionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{12} } +func (m *CompactionRequest) GetRevision() int64 { + if m != nil { + return m.Revision + } + return 0 +} + +func (m *CompactionRequest) GetPhysical() bool { + if m != nil { + return m.Physical + } + return false +} + type CompactionResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1015,6 +1269,13 @@ func (m *HashResponse) GetHeader() *ResponseHeader { return nil } +func (m *HashResponse) GetHash() uint32 { + if m != nil { + return m.Hash + } + return 0 +} + type SnapshotRequest struct { } @@ -1045,6 +1306,20 @@ func (m *SnapshotResponse) GetHeader() *ResponseHeader { return nil } +func (m *SnapshotResponse) GetRemainingBytes() uint64 { + if m != nil { + return m.RemainingBytes + } + return 0 +} + +func (m *SnapshotResponse) GetBlob() []byte { + if m != nil { + return m.Blob + } + return nil +} + type WatchRequest struct { // request_union is a request to either create a new watcher or cancel an existing watcher. // @@ -1198,6 +1473,48 @@ func (m *WatchCreateRequest) String() string { return proto.CompactTe func (*WatchCreateRequest) ProtoMessage() {} func (*WatchCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{19} } +func (m *WatchCreateRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *WatchCreateRequest) GetRangeEnd() []byte { + if m != nil { + return m.RangeEnd + } + return nil +} + +func (m *WatchCreateRequest) GetStartRevision() int64 { + if m != nil { + return m.StartRevision + } + return 0 +} + +func (m *WatchCreateRequest) GetProgressNotify() bool { + if m != nil { + return m.ProgressNotify + } + return false +} + +func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType { + if m != nil { + return m.Filters + } + return nil +} + +func (m *WatchCreateRequest) GetPrevKv() bool { + if m != nil { + return m.PrevKv + } + return false +} + type WatchCancelRequest struct { // watch_id is the watcher id to cancel so that no more events are transmitted. WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"` @@ -1208,6 +1525,13 @@ func (m *WatchCancelRequest) String() string { return proto.CompactTe func (*WatchCancelRequest) ProtoMessage() {} func (*WatchCancelRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{20} } +func (m *WatchCancelRequest) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + type WatchResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // watch_id is the ID of the watcher that corresponds to the response. @@ -1228,8 +1552,10 @@ type WatchResponse struct { // // The client should treat the watcher as canceled and should not try to create any // watcher with the same start_revision again. - CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` - Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` + CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"` + // cancel_reason indicates the reason for canceling the watcher. + CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"` + Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events" json:"events,omitempty"` } func (m *WatchResponse) Reset() { *m = WatchResponse{} } @@ -1244,6 +1570,41 @@ func (m *WatchResponse) GetHeader() *ResponseHeader { return nil } +func (m *WatchResponse) GetWatchId() int64 { + if m != nil { + return m.WatchId + } + return 0 +} + +func (m *WatchResponse) GetCreated() bool { + if m != nil { + return m.Created + } + return false +} + +func (m *WatchResponse) GetCanceled() bool { + if m != nil { + return m.Canceled + } + return false +} + +func (m *WatchResponse) GetCompactRevision() int64 { + if m != nil { + return m.CompactRevision + } + return 0 +} + +func (m *WatchResponse) GetCancelReason() string { + if m != nil { + return m.CancelReason + } + return "" +} + func (m *WatchResponse) GetEvents() []*mvccpb.Event { if m != nil { return m.Events @@ -1263,6 +1624,20 @@ func (m *LeaseGrantRequest) String() string { return proto.CompactTex func (*LeaseGrantRequest) ProtoMessage() {} func (*LeaseGrantRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{22} } +func (m *LeaseGrantRequest) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + type LeaseGrantResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID for the granted lease. @@ -1284,6 +1659,27 @@ func (m *LeaseGrantResponse) GetHeader() *ResponseHeader { return nil } +func (m *LeaseGrantResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseGrantResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseGrantResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + type LeaseRevokeRequest struct { // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted. ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1294,6 +1690,13 @@ func (m *LeaseRevokeRequest) String() string { return proto.CompactTe func (*LeaseRevokeRequest) ProtoMessage() {} func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{24} } +func (m *LeaseRevokeRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + type LeaseRevokeResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1320,6 +1723,13 @@ func (m *LeaseKeepAliveRequest) String() string { return proto.Compac func (*LeaseKeepAliveRequest) ProtoMessage() {} func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{26} } +func (m *LeaseKeepAliveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + type LeaseKeepAliveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID from the keep alive request. @@ -1333,11 +1743,25 @@ func (m *LeaseKeepAliveResponse) String() string { return proto.Compa func (*LeaseKeepAliveResponse) ProtoMessage() {} func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{27} } -func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { +func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *LeaseKeepAliveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseKeepAliveResponse) GetTTL() int64 { if m != nil { - return m.Header + return m.TTL } - return nil + return 0 } type LeaseTimeToLiveRequest struct { @@ -1352,6 +1776,20 @@ func (m *LeaseTimeToLiveRequest) String() string { return proto.Compa func (*LeaseTimeToLiveRequest) ProtoMessage() {} func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{28} } +func (m *LeaseTimeToLiveRequest) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveRequest) GetKeys() bool { + if m != nil { + return m.Keys + } + return false +} + type LeaseTimeToLiveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // ID is the lease ID from the keep alive request. @@ -1376,6 +1814,34 @@ func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader { return nil } +func (m *LeaseTimeToLiveResponse) GetID() int64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetTTL() int64 { + if m != nil { + return m.TTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 { + if m != nil { + return m.GrantedTTL + } + return 0 +} + +func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte { + if m != nil { + return m.Keys + } + return nil +} + type Member struct { // ID is the member ID for this member. ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1392,6 +1858,34 @@ func (m *Member) String() string { return proto.CompactTextString(m) func (*Member) ProtoMessage() {} func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{30} } +func (m *Member) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *Member) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Member) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + +func (m *Member) GetClientURLs() []string { + if m != nil { + return m.ClientURLs + } + return nil +} + type MemberAddRequest struct { // peerURLs is the list of URLs the added member will use to communicate with the cluster. PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs" json:"peerURLs,omitempty"` @@ -1402,10 +1896,19 @@ func (m *MemberAddRequest) String() string { return proto.CompactText func (*MemberAddRequest) ProtoMessage() {} func (*MemberAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{31} } +func (m *MemberAddRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + type MemberAddResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // member is the member information for the added member. Member *Member `protobuf:"bytes,2,opt,name=member" json:"member,omitempty"` + // members is a list of all members after adding the new member. + Members []*Member `protobuf:"bytes,3,rep,name=members" json:"members,omitempty"` } func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} } @@ -1427,6 +1930,13 @@ func (m *MemberAddResponse) GetMember() *Member { return nil } +func (m *MemberAddResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + type MemberRemoveRequest struct { // ID is the member ID of the member to remove. ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1437,8 +1947,17 @@ func (m *MemberRemoveRequest) String() string { return proto.CompactT func (*MemberRemoveRequest) ProtoMessage() {} func (*MemberRemoveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{33} } +func (m *MemberRemoveRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + type MemberRemoveResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after removing the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` } func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} } @@ -1453,6 +1972,13 @@ func (m *MemberRemoveResponse) GetHeader() *ResponseHeader { return nil } +func (m *MemberRemoveResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + type MemberUpdateRequest struct { // ID is the member ID of the member to update. ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"` @@ -1465,8 +1991,24 @@ func (m *MemberUpdateRequest) String() string { return proto.CompactT func (*MemberUpdateRequest) ProtoMessage() {} func (*MemberUpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{35} } +func (m *MemberUpdateRequest) GetID() uint64 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *MemberUpdateRequest) GetPeerURLs() []string { + if m != nil { + return m.PeerURLs + } + return nil +} + type MemberUpdateResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` + // members is a list of all members after updating the member. + Members []*Member `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` } func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} } @@ -1481,6 +2023,13 @@ func (m *MemberUpdateResponse) GetHeader() *ResponseHeader { return nil } +func (m *MemberUpdateResponse) GetMembers() []*Member { + if m != nil { + return m.Members + } + return nil +} + type MemberListRequest struct { } @@ -1555,6 +2104,27 @@ func (m *AlarmRequest) String() string { return proto.CompactTextStri func (*AlarmRequest) ProtoMessage() {} func (*AlarmRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{41} } +func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction { + if m != nil { + return m.Action + } + return AlarmRequest_GET +} + +func (m *AlarmRequest) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmRequest) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + type AlarmMember struct { // memberID is the ID of the member associated with the raised alarm. MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"` @@ -1567,6 +2137,20 @@ func (m *AlarmMember) String() string { return proto.CompactTextStrin func (*AlarmMember) ProtoMessage() {} func (*AlarmMember) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{42} } +func (m *AlarmMember) GetMemberID() uint64 { + if m != nil { + return m.MemberID + } + return 0 +} + +func (m *AlarmMember) GetAlarm() AlarmType { + if m != nil { + return m.Alarm + } + return AlarmType_NONE +} + type AlarmResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` // alarms is a list of alarms associated with the alarm request. @@ -1626,6 +2210,41 @@ func (m *StatusResponse) GetHeader() *ResponseHeader { return nil } +func (m *StatusResponse) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *StatusResponse) GetDbSize() int64 { + if m != nil { + return m.DbSize + } + return 0 +} + +func (m *StatusResponse) GetLeader() uint64 { + if m != nil { + return m.Leader + } + return 0 +} + +func (m *StatusResponse) GetRaftIndex() uint64 { + if m != nil { + return m.RaftIndex + } + return 0 +} + +func (m *StatusResponse) GetRaftTerm() uint64 { + if m != nil { + return m.RaftTerm + } + return 0 +} + type AuthEnableRequest struct { } @@ -1652,6 +2271,20 @@ func (m *AuthenticateRequest) String() string { return proto.CompactT func (*AuthenticateRequest) ProtoMessage() {} func (*AuthenticateRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{48} } +func (m *AuthenticateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthenticateRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + type AuthUserAddRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` @@ -1662,6 +2295,20 @@ func (m *AuthUserAddRequest) String() string { return proto.CompactTe func (*AuthUserAddRequest) ProtoMessage() {} func (*AuthUserAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{49} } +func (m *AuthUserAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserAddRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + type AuthUserGetRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -1671,6 +2318,13 @@ func (m *AuthUserGetRequest) String() string { return proto.CompactTe func (*AuthUserGetRequest) ProtoMessage() {} func (*AuthUserGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{50} } +func (m *AuthUserGetRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type AuthUserDeleteRequest struct { // name is the name of the user to delete. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1681,6 +2335,13 @@ func (m *AuthUserDeleteRequest) String() string { return proto.Compac func (*AuthUserDeleteRequest) ProtoMessage() {} func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{51} } +func (m *AuthUserDeleteRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type AuthUserChangePasswordRequest struct { // name is the name of the user whose password is being changed. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1695,6 +2356,20 @@ func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{52} } +func (m *AuthUserChangePasswordRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserChangePasswordRequest) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + type AuthUserGrantRoleRequest struct { // user is the name of the user which should be granted a given role. User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` @@ -1707,6 +2382,20 @@ func (m *AuthUserGrantRoleRequest) String() string { return proto.Com func (*AuthUserGrantRoleRequest) ProtoMessage() {} func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{53} } +func (m *AuthUserGrantRoleRequest) GetUser() string { + if m != nil { + return m.User + } + return "" +} + +func (m *AuthUserGrantRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + type AuthUserRevokeRoleRequest struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` @@ -1717,6 +2406,20 @@ func (m *AuthUserRevokeRoleRequest) String() string { return proto.Co func (*AuthUserRevokeRoleRequest) ProtoMessage() {} func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{54} } +func (m *AuthUserRevokeRoleRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *AuthUserRevokeRoleRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + type AuthRoleAddRequest struct { // name is the name of the role to add to the authentication system. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1727,6 +2430,13 @@ func (m *AuthRoleAddRequest) String() string { return proto.CompactTe func (*AuthRoleAddRequest) ProtoMessage() {} func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{55} } +func (m *AuthRoleAddRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + type AuthRoleGetRequest struct { Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"` } @@ -1736,6 +2446,13 @@ func (m *AuthRoleGetRequest) String() string { return proto.CompactTe func (*AuthRoleGetRequest) ProtoMessage() {} func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{56} } +func (m *AuthRoleGetRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + type AuthUserListRequest struct { } @@ -1761,6 +2478,13 @@ func (m *AuthRoleDeleteRequest) String() string { return proto.Compac func (*AuthRoleDeleteRequest) ProtoMessage() {} func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{59} } +func (m *AuthRoleDeleteRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + type AuthRoleGrantPermissionRequest struct { // name is the name of the role which will be granted the permission. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` @@ -1775,6 +2499,13 @@ func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{60} } +func (m *AuthRoleGrantPermissionRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission { if m != nil { return m.Perm @@ -1795,6 +2526,27 @@ func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{61} } +func (m *AuthRoleRevokePermissionRequest) GetRole() string { + if m != nil { + return m.Role + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string { + if m != nil { + return m.RangeEnd + } + return "" +} + type AuthEnableResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1845,6 +2597,13 @@ func (m *AuthenticateResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthenticateResponse) GetToken() string { + if m != nil { + return m.Token + } + return "" +} + type AuthUserAddResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -1878,6 +2637,13 @@ func (m *AuthUserGetResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthUserGetResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + type AuthUserDeleteResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -2001,6 +2767,13 @@ func (m *AuthRoleListResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthRoleListResponse) GetRoles() []string { + if m != nil { + return m.Roles + } + return nil +} + type AuthUserListResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` Users []string `protobuf:"bytes,2,rep,name=users" json:"users,omitempty"` @@ -2018,6 +2791,13 @@ func (m *AuthUserListResponse) GetHeader() *ResponseHeader { return nil } +func (m *AuthUserListResponse) GetUsers() []string { + if m != nil { + return m.Users + } + return nil +} + type AuthRoleDeleteResponse struct { Header *ResponseHeader `protobuf:"bytes,1,opt,name=header" json:"header,omitempty"` } @@ -3929,12 +4709,32 @@ func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) { if m.Lease != 0 { dAtA[i] = 0x18 i++ - i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) + i = encodeVarintRpc(dAtA, i, uint64(m.Lease)) + } + if m.PrevKv { + dAtA[i] = 0x20 + i++ + if m.PrevKv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IgnoreValue { + dAtA[i] = 0x28 + i++ + if m.IgnoreValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ } - if m.PrevKv { - dAtA[i] = 0x20 + if m.IgnoreLease { + dAtA[i] = 0x30 i++ - if m.PrevKv { + if m.IgnoreLease { dAtA[i] = 1 } else { dAtA[i] = 0 @@ -4749,6 +5549,12 @@ func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision)) } + if len(m.CancelReason) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason))) + i += copy(dAtA[i:], m.CancelReason) + } if len(m.Events) > 0 { for _, msg := range m.Events { dAtA[i] = 0x5a @@ -5159,6 +5965,18 @@ func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) { } i += n29 } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x1a + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -5210,6 +6028,18 @@ func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) { } i += n30 } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -5276,6 +6106,18 @@ func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) { } i += n31 } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRpc(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -6610,6 +7452,12 @@ func (m *PutRequest) Size() (n int) { if m.PrevKv { n += 2 } + if m.IgnoreValue { + n += 2 + } + if m.IgnoreLease { + n += 2 + } return n } @@ -6973,6 +7821,10 @@ func (m *WatchResponse) Size() (n int) { if m.CompactRevision != 0 { n += 1 + sovRpc(uint64(m.CompactRevision)) } + l = len(m.CancelReason) + if l > 0 { + n += 1 + l + sovRpc(uint64(l)) + } if len(m.Events) > 0 { for _, e := range m.Events { l = e.Size() @@ -7143,6 +7995,12 @@ func (m *MemberAddResponse) Size() (n int) { l = m.Member.Size() n += 1 + l + sovRpc(uint64(l)) } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } return n } @@ -7162,6 +8020,12 @@ func (m *MemberRemoveResponse) Size() (n int) { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } return n } @@ -7187,6 +8051,12 @@ func (m *MemberUpdateResponse) Size() (n int) { l = m.Header.Size() n += 1 + l + sovRpc(uint64(l)) } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } return n } @@ -8413,6 +9283,46 @@ func (m *PutRequest) Unmarshal(dAtA []byte) error { } } m.PrevKv = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreValue = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreLease = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -10345,7 +11255,24 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } m.ProgressNotify = bool(v != 0) case 5: - if wireType == 2 { + if wireType == 0 { + var v WatchCreateRequest_FilterType + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Filters = append(m.Filters, v) + } else if wireType == 2 { var packedLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { @@ -10386,23 +11313,6 @@ func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error { } m.Filters = append(m.Filters, v) } - } else if wireType == 0 { - var v WatchCreateRequest_FilterType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (WatchCreateRequest_FilterType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Filters = append(m.Filters, v) } else { return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) } @@ -10656,6 +11566,35 @@ func (m *WatchResponse) Unmarshal(dAtA []byte) error { break } } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CancelReason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 11: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) @@ -11876,6 +12815,37 @@ func (m *MemberAddResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -12028,6 +12998,37 @@ func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -12209,6 +13210,37 @@ func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &Member{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -16041,218 +17073,221 @@ var ( func init() { proto.RegisterFile("rpc.proto", fileDescriptorRpc) } var fileDescriptorRpc = []byte{ - // 3401 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x5b, 0xcb, 0x73, 0x1b, 0xc7, - 0xd1, 0xe7, 0x02, 0x24, 0x40, 0x34, 0x1e, 0x84, 0x86, 0x94, 0x04, 0xae, 0x24, 0x8a, 0x1a, 0xbd, - 0x28, 0xc9, 0x26, 0x6d, 0xda, 0xdf, 0x77, 0xd0, 0xe7, 0x72, 0x7d, 0x14, 0x09, 0x8b, 0x0c, 0x29, - 0x52, 0x5e, 0x52, 0xb2, 0x53, 0xe5, 0x0a, 0x6a, 0x09, 0x8c, 0xc8, 0x2d, 0x02, 0xbb, 0xf0, 0xee, - 0x02, 0x22, 0x9d, 0xa4, 0x2a, 0xe5, 0xd8, 0x95, 0x4a, 0x8e, 0xf1, 0x21, 0xaf, 0x63, 0x2a, 0x87, - 0xfc, 0x01, 0xb9, 0xe5, 0x0f, 0x48, 0xe5, 0x92, 0x54, 0xe5, 0x1f, 0x48, 0x39, 0x39, 0xe4, 0x90, - 0x7b, 0x4e, 0xa9, 0xa4, 0xe6, 0xb5, 0x3b, 0xbb, 0xd8, 0x05, 0xe5, 0x6c, 0x7c, 0x11, 0x77, 0x66, - 0x7a, 0xfa, 0xd7, 0xdd, 0x33, 0xdd, 0xd3, 0xd3, 0x03, 0x41, 0xc9, 0xed, 0xb7, 0x97, 0xfb, 0xae, - 0xe3, 0x3b, 0xa8, 0x42, 0xfc, 0x76, 0xc7, 0x23, 0xee, 0x90, 0xb8, 0xfd, 0x43, 0x7d, 0xee, 0xc8, - 0x39, 0x72, 0xd8, 0xc0, 0x0a, 0xfd, 0xe2, 0x34, 0xfa, 0x3c, 0xa5, 0x59, 0xe9, 0x0d, 0xdb, 0x6d, - 0xf6, 0x4f, 0xff, 0x70, 0xe5, 0x64, 0x28, 0x86, 0xae, 0xb0, 0x21, 0x73, 0xe0, 0x1f, 0xb3, 0x7f, - 0xfa, 0x87, 0xec, 0x8f, 0x18, 0xbc, 0x7a, 0xe4, 0x38, 0x47, 0x5d, 0xb2, 0x62, 0xf6, 0xad, 0x15, - 0xd3, 0xb6, 0x1d, 0xdf, 0xf4, 0x2d, 0xc7, 0xf6, 0xf8, 0x28, 0xfe, 0x5c, 0x83, 0x9a, 0x41, 0xbc, - 0xbe, 0x63, 0x7b, 0x64, 0x93, 0x98, 0x1d, 0xe2, 0xa2, 0x6b, 0x00, 0xed, 0xee, 0xc0, 0xf3, 0x89, - 0xdb, 0xb2, 0x3a, 0x0d, 0x6d, 0x51, 0x5b, 0x9a, 0x34, 0x4a, 0xa2, 0x67, 0xab, 0x83, 0xae, 0x40, - 0xa9, 0x47, 0x7a, 0x87, 0x7c, 0x34, 0xc7, 0x46, 0xa7, 0x79, 0xc7, 0x56, 0x07, 0xe9, 0x30, 0xed, - 0x92, 0xa1, 0xe5, 0x59, 0x8e, 0xdd, 0xc8, 0x2f, 0x6a, 0x4b, 0x79, 0x23, 0x68, 0xd3, 0x89, 0xae, - 0xf9, 0xc2, 0x6f, 0xf9, 0xc4, 0xed, 0x35, 0x26, 0xf9, 0x44, 0xda, 0x71, 0x40, 0xdc, 0x1e, 0xfe, - 0x6c, 0x0a, 0x2a, 0x86, 0x69, 0x1f, 0x11, 0x83, 0x7c, 0x3c, 0x20, 0x9e, 0x8f, 0xea, 0x90, 0x3f, - 0x21, 0x67, 0x0c, 0xbe, 0x62, 0xd0, 0x4f, 0x3e, 0xdf, 0x3e, 0x22, 0x2d, 0x62, 0x73, 0xe0, 0x0a, - 0x9d, 0x6f, 0x1f, 0x91, 0xa6, 0xdd, 0x41, 0x73, 0x30, 0xd5, 0xb5, 0x7a, 0x96, 0x2f, 0x50, 0x79, - 0x23, 0x22, 0xce, 0x64, 0x4c, 0x9c, 0x75, 0x00, 0xcf, 0x71, 0xfd, 0x96, 0xe3, 0x76, 0x88, 0xdb, - 0x98, 0x5a, 0xd4, 0x96, 0x6a, 0xab, 0xb7, 0x96, 0xd5, 0x85, 0x58, 0x56, 0x05, 0x5a, 0xde, 0x77, - 0x5c, 0x7f, 0x8f, 0xd2, 0x1a, 0x25, 0x4f, 0x7e, 0xa2, 0xf7, 0xa0, 0xcc, 0x98, 0xf8, 0xa6, 0x7b, - 0x44, 0xfc, 0x46, 0x81, 0x71, 0xb9, 0x7d, 0x0e, 0x97, 0x03, 0x46, 0x6c, 0x30, 0x78, 0xfe, 0x8d, - 0x30, 0x54, 0x3c, 0xe2, 0x5a, 0x66, 0xd7, 0xfa, 0xc4, 0x3c, 0xec, 0x92, 0x46, 0x71, 0x51, 0x5b, - 0x9a, 0x36, 0x22, 0x7d, 0x54, 0xff, 0x13, 0x72, 0xe6, 0xb5, 0x1c, 0xbb, 0x7b, 0xd6, 0x98, 0x66, - 0x04, 0xd3, 0xb4, 0x63, 0xcf, 0xee, 0x9e, 0xb1, 0x45, 0x73, 0x06, 0xb6, 0xcf, 0x47, 0x4b, 0x6c, - 0xb4, 0xc4, 0x7a, 0xd8, 0xf0, 0x12, 0xd4, 0x7b, 0x96, 0xdd, 0xea, 0x39, 0x9d, 0x56, 0x60, 0x10, - 0x60, 0x06, 0xa9, 0xf5, 0x2c, 0xfb, 0x89, 0xd3, 0x31, 0xa4, 0x59, 0x28, 0xa5, 0x79, 0x1a, 0xa5, - 0x2c, 0x0b, 0x4a, 0xf3, 0x54, 0xa5, 0x5c, 0x86, 0x59, 0xca, 0xb3, 0xed, 0x12, 0xd3, 0x27, 0x21, - 0x71, 0x85, 0x11, 0x5f, 0xe8, 0x59, 0xf6, 0x3a, 0x1b, 0x89, 0xd0, 0x9b, 0xa7, 0x23, 0xf4, 0x55, - 0x41, 0x6f, 0x9e, 0x46, 0xe9, 0xf1, 0x32, 0x94, 0x02, 0x9b, 0xa3, 0x69, 0x98, 0xdc, 0xdd, 0xdb, - 0x6d, 0xd6, 0x27, 0x10, 0x40, 0x61, 0x6d, 0x7f, 0xbd, 0xb9, 0xbb, 0x51, 0xd7, 0x50, 0x19, 0x8a, - 0x1b, 0x4d, 0xde, 0xc8, 0xe1, 0x47, 0x00, 0xa1, 0x75, 0x51, 0x11, 0xf2, 0xdb, 0xcd, 0x6f, 0xd6, - 0x27, 0x28, 0xcd, 0xf3, 0xa6, 0xb1, 0xbf, 0xb5, 0xb7, 0x5b, 0xd7, 0xe8, 0xe4, 0x75, 0xa3, 0xb9, - 0x76, 0xd0, 0xac, 0xe7, 0x28, 0xc5, 0x93, 0xbd, 0x8d, 0x7a, 0x1e, 0x95, 0x60, 0xea, 0xf9, 0xda, - 0xce, 0xb3, 0x66, 0x7d, 0x12, 0x7f, 0xa1, 0x41, 0x55, 0xac, 0x17, 0xf7, 0x09, 0xf4, 0x36, 0x14, - 0x8e, 0x99, 0x5f, 0xb0, 0xad, 0x58, 0x5e, 0xbd, 0x1a, 0x5b, 0xdc, 0x88, 0xef, 0x18, 0x82, 0x16, - 0x61, 0xc8, 0x9f, 0x0c, 0xbd, 0x46, 0x6e, 0x31, 0xbf, 0x54, 0x5e, 0xad, 0x2f, 0x73, 0x87, 0x5d, - 0xde, 0x26, 0x67, 0xcf, 0xcd, 0xee, 0x80, 0x18, 0x74, 0x10, 0x21, 0x98, 0xec, 0x39, 0x2e, 0x61, - 0x3b, 0x76, 0xda, 0x60, 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0x70, 0x1b, 0xe0, - 0xe9, 0xc0, 0x4f, 0xf7, 0x8c, 0x39, 0x98, 0x1a, 0x52, 0xbe, 0xc2, 0x2b, 0x78, 0x83, 0xb9, 0x04, - 0x31, 0x3d, 0x12, 0xb8, 0x04, 0x6d, 0xa0, 0xcb, 0x50, 0xec, 0xbb, 0x64, 0xd8, 0x3a, 0x19, 0x32, - 0x8c, 0x69, 0xa3, 0x40, 0x9b, 0xdb, 0x43, 0x6c, 0x43, 0x99, 0x81, 0x64, 0xd2, 0xfb, 0x5e, 0xc8, - 0x3d, 0xc7, 0xa6, 0x8d, 0xea, 0x2e, 0xf1, 0x3e, 0x02, 0xb4, 0x41, 0xba, 0xc4, 0x27, 0x59, 0xdc, - 0x5e, 0xd1, 0x26, 0x1f, 0xd1, 0xe6, 0xc7, 0x1a, 0xcc, 0x46, 0xd8, 0x67, 0x52, 0xab, 0x01, 0xc5, - 0x0e, 0x63, 0xc6, 0x25, 0xc8, 0x1b, 0xb2, 0x89, 0x1e, 0xc0, 0xb4, 0x10, 0xc0, 0x6b, 0xe4, 0x53, - 0x56, 0xbb, 0xc8, 0x65, 0xf2, 0xf0, 0xdf, 0x35, 0x28, 0x09, 0x45, 0xf7, 0xfa, 0x68, 0x0d, 0xaa, - 0x2e, 0x6f, 0xb4, 0x98, 0x3e, 0x42, 0x22, 0x3d, 0x3d, 0x7a, 0x6c, 0x4e, 0x18, 0x15, 0x31, 0x85, - 0x75, 0xa3, 0xff, 0x83, 0xb2, 0x64, 0xd1, 0x1f, 0xf8, 0xc2, 0xe4, 0x8d, 0x28, 0x83, 0x70, 0xe7, - 0x6c, 0x4e, 0x18, 0x20, 0xc8, 0x9f, 0x0e, 0x7c, 0x74, 0x00, 0x73, 0x72, 0x32, 0xd7, 0x46, 0x88, - 0x91, 0x67, 0x5c, 0x16, 0xa3, 0x5c, 0x46, 0x97, 0x6a, 0x73, 0xc2, 0x40, 0x62, 0xbe, 0x32, 0xf8, - 0xa8, 0x04, 0x45, 0xd1, 0x8b, 0xff, 0xa1, 0x01, 0x48, 0x83, 0xee, 0xf5, 0xd1, 0x06, 0xd4, 0x5c, - 0xd1, 0x8a, 0x28, 0x7c, 0x25, 0x51, 0x61, 0xb1, 0x0e, 0x13, 0x46, 0x55, 0x4e, 0xe2, 0x2a, 0xbf, - 0x0b, 0x95, 0x80, 0x4b, 0xa8, 0xf3, 0x7c, 0x82, 0xce, 0x01, 0x87, 0xb2, 0x9c, 0x40, 0xb5, 0xfe, - 0x00, 0x2e, 0x06, 0xf3, 0x13, 0xd4, 0xbe, 0x31, 0x46, 0xed, 0x80, 0xe1, 0xac, 0xe4, 0xa0, 0x2a, - 0x0e, 0xf4, 0xac, 0xe1, 0xdd, 0xf8, 0xd7, 0x79, 0x28, 0xae, 0x3b, 0xbd, 0xbe, 0xe9, 0xd2, 0x35, - 0x2a, 0xb8, 0xc4, 0x1b, 0x74, 0x7d, 0xa6, 0x6e, 0x6d, 0xf5, 0x66, 0x14, 0x41, 0x90, 0xc9, 0xbf, - 0x06, 0x23, 0x35, 0xc4, 0x14, 0x3a, 0x59, 0x1c, 0x2d, 0xb9, 0x57, 0x98, 0x2c, 0x0e, 0x16, 0x31, - 0x45, 0xfa, 0x52, 0x3e, 0xf4, 0x25, 0x1d, 0x8a, 0x43, 0xe2, 0x86, 0xc7, 0xe1, 0xe6, 0x84, 0x21, - 0x3b, 0xd0, 0x3d, 0x98, 0x89, 0x87, 0xe6, 0x29, 0x41, 0x53, 0x6b, 0x47, 0x23, 0xf9, 0x4d, 0xa8, - 0x44, 0xce, 0x87, 0x82, 0xa0, 0x2b, 0xf7, 0x94, 0xe3, 0xe1, 0x92, 0x0c, 0x4a, 0xf4, 0x2c, 0xab, - 0x6c, 0x4e, 0x88, 0xb0, 0x84, 0xff, 0x1f, 0xaa, 0x11, 0x5d, 0x69, 0xf8, 0x6d, 0xbe, 0xff, 0x6c, - 0x6d, 0x87, 0xc7, 0xea, 0xc7, 0x2c, 0x3c, 0x1b, 0x75, 0x8d, 0x86, 0xfc, 0x9d, 0xe6, 0xfe, 0x7e, - 0x3d, 0x87, 0xaa, 0x50, 0xda, 0xdd, 0x3b, 0x68, 0x71, 0xaa, 0x3c, 0x7e, 0x27, 0xe0, 0x20, 0x62, - 0xbd, 0x12, 0xe2, 0x27, 0x94, 0x10, 0xaf, 0xc9, 0x10, 0x9f, 0x0b, 0x43, 0x7c, 0xfe, 0x51, 0x0d, - 0x2a, 0xdc, 0x3e, 0xad, 0x81, 0x4d, 0x8f, 0x99, 0x5f, 0x6a, 0x00, 0x07, 0xa7, 0xb6, 0x0c, 0x40, - 0x2b, 0x50, 0x6c, 0x73, 0xe6, 0x0d, 0x8d, 0xf9, 0xf3, 0xc5, 0x44, 0x93, 0x1b, 0x92, 0x0a, 0xbd, - 0x09, 0x45, 0x6f, 0xd0, 0x6e, 0x13, 0x4f, 0x86, 0xfb, 0xcb, 0xf1, 0x90, 0x22, 0x1c, 0xde, 0x90, - 0x74, 0x74, 0xca, 0x0b, 0xd3, 0xea, 0x0e, 0x58, 0xf0, 0x1f, 0x3f, 0x45, 0xd0, 0xe1, 0x9f, 0x69, - 0x50, 0x66, 0x52, 0x66, 0x8a, 0x63, 0x57, 0xa1, 0xc4, 0x64, 0x20, 0x1d, 0x11, 0xc9, 0xa6, 0x8d, - 0xb0, 0x03, 0xfd, 0x2f, 0x94, 0xe4, 0x0e, 0x96, 0xc1, 0xac, 0x91, 0xcc, 0x76, 0xaf, 0x6f, 0x84, - 0xa4, 0x78, 0x1b, 0x2e, 0x30, 0xab, 0xb4, 0x69, 0x62, 0x29, 0xed, 0xa8, 0xa6, 0x5e, 0x5a, 0x2c, - 0xf5, 0xd2, 0x61, 0xba, 0x7f, 0x7c, 0xe6, 0x59, 0x6d, 0xb3, 0x2b, 0xa4, 0x08, 0xda, 0xf8, 0x1b, - 0x80, 0x54, 0x66, 0x59, 0xd4, 0xc5, 0x55, 0x28, 0x6f, 0x9a, 0xde, 0xb1, 0x10, 0x09, 0x7f, 0x08, - 0x15, 0xde, 0xcc, 0x64, 0x43, 0x04, 0x93, 0xc7, 0xa6, 0x77, 0xcc, 0x04, 0xaf, 0x1a, 0xec, 0x1b, - 0x5f, 0x80, 0x99, 0x7d, 0xdb, 0xec, 0x7b, 0xc7, 0x8e, 0x8c, 0xb5, 0x34, 0xb1, 0xae, 0x87, 0x7d, - 0x99, 0x10, 0xef, 0xc2, 0x8c, 0x4b, 0x7a, 0xa6, 0x65, 0x5b, 0xf6, 0x51, 0xeb, 0xf0, 0xcc, 0x27, - 0x9e, 0xc8, 0xbb, 0x6b, 0x41, 0xf7, 0x23, 0xda, 0x4b, 0x45, 0x3b, 0xec, 0x3a, 0x87, 0xc2, 0xe3, - 0xd9, 0x37, 0xfe, 0x8d, 0x06, 0x95, 0x0f, 0x4c, 0xbf, 0x2d, 0xad, 0x80, 0xb6, 0xa0, 0x16, 0xf8, - 0x39, 0xeb, 0x11, 0xb2, 0xc4, 0x02, 0x3e, 0x9b, 0x23, 0x33, 0x32, 0x19, 0xf0, 0xab, 0x6d, 0xb5, - 0x83, 0xb1, 0x32, 0xed, 0x36, 0xe9, 0x06, 0xac, 0x72, 0xe9, 0xac, 0x18, 0xa1, 0xca, 0x4a, 0xed, - 0x78, 0x34, 0x13, 0x1e, 0x86, 0xdc, 0x2d, 0x7f, 0x9e, 0x03, 0x34, 0x2a, 0xc3, 0x57, 0xcd, 0x0f, - 0x6e, 0x43, 0xcd, 0xf3, 0x4d, 0xd7, 0x6f, 0xc5, 0x6e, 0x25, 0x55, 0xd6, 0x1b, 0xc4, 0xaa, 0xbb, - 0x30, 0xd3, 0x77, 0x9d, 0x23, 0x97, 0x78, 0x5e, 0xcb, 0x76, 0x7c, 0xeb, 0xc5, 0x99, 0x48, 0x8e, - 0x6a, 0xb2, 0x7b, 0x97, 0xf5, 0xa2, 0x26, 0x14, 0x5f, 0x58, 0x5d, 0x9f, 0xb8, 0x5e, 0x63, 0x6a, - 0x31, 0xbf, 0x54, 0x5b, 0x7d, 0x70, 0x9e, 0xd5, 0x96, 0xdf, 0x63, 0xf4, 0x07, 0x67, 0x7d, 0x62, - 0xc8, 0xb9, 0x6a, 0xda, 0x52, 0x88, 0xa4, 0x2d, 0xb7, 0x01, 0x42, 0x7a, 0x1a, 0xb5, 0x76, 0xf7, - 0x9e, 0x3e, 0x3b, 0xa8, 0x4f, 0xa0, 0x0a, 0x4c, 0xef, 0xee, 0x6d, 0x34, 0x77, 0x9a, 0x34, 0xae, - 0xe1, 0x15, 0x69, 0x1b, 0xd5, 0x86, 0x68, 0x1e, 0xa6, 0x5f, 0xd2, 0x5e, 0x79, 0x6d, 0xcb, 0x1b, - 0x45, 0xd6, 0xde, 0xea, 0xe0, 0xbf, 0x69, 0x50, 0x15, 0xbb, 0x20, 0xd3, 0x56, 0x54, 0x21, 0x72, - 0x11, 0x08, 0x9a, 0x23, 0xf1, 0xdd, 0xd1, 0x11, 0xa9, 0x98, 0x6c, 0x52, 0x77, 0xe7, 0x8b, 0x4d, - 0x3a, 0xc2, 0xac, 0x41, 0x1b, 0xdd, 0x83, 0x7a, 0x9b, 0xbb, 0x7b, 0xec, 0xd8, 0x31, 0x66, 0x44, - 0x7f, 0xb0, 0x48, 0xb7, 0xa1, 0x40, 0x86, 0xc4, 0xf6, 0xbd, 0x46, 0x99, 0xc5, 0xa6, 0xaa, 0x4c, - 0xb4, 0x9a, 0xb4, 0xd7, 0x10, 0x83, 0xf8, 0x7f, 0xe0, 0xc2, 0x0e, 0xcd, 0x74, 0x1f, 0xbb, 0xa6, - 0xad, 0xe6, 0xcc, 0x07, 0x07, 0x3b, 0xc2, 0x2a, 0xf4, 0x13, 0xd5, 0x20, 0xb7, 0xb5, 0x21, 0x74, - 0xc8, 0x6d, 0x6d, 0xe0, 0x4f, 0x35, 0x40, 0xea, 0xbc, 0x4c, 0x66, 0x8a, 0x31, 0x97, 0xf0, 0xf9, - 0x10, 0x7e, 0x0e, 0xa6, 0x88, 0xeb, 0x3a, 0x2e, 0x33, 0x48, 0xc9, 0xe0, 0x0d, 0x7c, 0x4b, 0xc8, - 0x60, 0x90, 0xa1, 0x73, 0x12, 0xec, 0x79, 0xce, 0x4d, 0x0b, 0x44, 0xdd, 0x86, 0xd9, 0x08, 0x55, - 0xa6, 0x18, 0x79, 0x17, 0x2e, 0x32, 0x66, 0xdb, 0x84, 0xf4, 0xd7, 0xba, 0xd6, 0x30, 0x15, 0xb5, - 0x0f, 0x97, 0xe2, 0x84, 0x5f, 0xaf, 0x8d, 0xf0, 0x3b, 0x02, 0xf1, 0xc0, 0xea, 0x91, 0x03, 0x67, - 0x27, 0x5d, 0x36, 0x1a, 0xf8, 0xe8, 0x4d, 0x58, 0x1c, 0x26, 0xec, 0x1b, 0xff, 0x4a, 0x83, 0xcb, - 0x23, 0xd3, 0xbf, 0xe6, 0x55, 0x5d, 0x00, 0x38, 0xa2, 0xdb, 0x87, 0x74, 0xe8, 0x00, 0xbf, 0xc3, - 0x29, 0x3d, 0x81, 0x9c, 0x34, 0x76, 0x54, 0x84, 0x9c, 0xc7, 0x50, 0x78, 0xc2, 0xca, 0x27, 0x8a, - 0x56, 0x93, 0x52, 0x2b, 0xdb, 0xec, 0xf1, 0x5b, 0x5d, 0xc9, 0x60, 0xdf, 0xec, 0xe8, 0x24, 0xc4, - 0x7d, 0x66, 0xec, 0xf0, 0x23, 0xba, 0x64, 0x04, 0x6d, 0x8a, 0xde, 0xee, 0x5a, 0xc4, 0xf6, 0xd9, - 0xe8, 0x24, 0x1b, 0x55, 0x7a, 0xf0, 0x32, 0xd4, 0x39, 0xd2, 0x5a, 0xa7, 0xa3, 0x1c, 0xd3, 0x01, - 0x3f, 0x2d, 0xca, 0x0f, 0xbf, 0x84, 0x0b, 0x0a, 0x7d, 0x26, 0xd3, 0xbd, 0x06, 0x05, 0x5e, 0x23, - 0x12, 0x27, 0xc4, 0x5c, 0x74, 0x16, 0x87, 0x31, 0x04, 0x0d, 0xbe, 0x0d, 0xb3, 0xa2, 0x87, 0xf4, - 0x9c, 0xa4, 0x55, 0x67, 0xf6, 0xc1, 0x3b, 0x30, 0x17, 0x25, 0xcb, 0xe4, 0x08, 0x6b, 0x12, 0xf4, - 0x59, 0xbf, 0xa3, 0x1c, 0x38, 0xf1, 0x45, 0x51, 0x0d, 0x96, 0x8b, 0x19, 0x2c, 0x10, 0x48, 0xb2, - 0xc8, 0x24, 0xd0, 0xac, 0x34, 0xff, 0x8e, 0xe5, 0x05, 0x69, 0xc5, 0x27, 0x80, 0xd4, 0xce, 0x4c, - 0x8b, 0xb2, 0x0c, 0x45, 0x6e, 0x70, 0x99, 0xb9, 0x26, 0xaf, 0x8a, 0x24, 0xa2, 0x02, 0x6d, 0x90, - 0x17, 0xae, 0x79, 0xd4, 0x23, 0x41, 0x64, 0xa5, 0xf9, 0x9a, 0xda, 0x99, 0x49, 0xe3, 0x3f, 0x68, - 0x50, 0x59, 0xeb, 0x9a, 0x6e, 0x4f, 0x1a, 0xff, 0x5d, 0x28, 0xf0, 0x44, 0x50, 0xdc, 0x9d, 0xee, - 0x44, 0xd9, 0xa8, 0xb4, 0xbc, 0xb1, 0xc6, 0xd3, 0x46, 0x31, 0x8b, 0x2e, 0x96, 0x28, 0x4d, 0x6e, - 0xc4, 0x4a, 0x95, 0x1b, 0xe8, 0x75, 0x98, 0x32, 0xe9, 0x14, 0xe6, 0xbf, 0xb5, 0x78, 0x0a, 0xce, - 0xb8, 0xb1, 0x43, 0x9b, 0x53, 0xe1, 0xb7, 0xa1, 0xac, 0x20, 0xd0, 0x9b, 0xc5, 0xe3, 0xa6, 0x38, - 0x98, 0xd7, 0xd6, 0x0f, 0xb6, 0x9e, 0xf3, 0x0b, 0x47, 0x0d, 0x60, 0xa3, 0x19, 0xb4, 0x73, 0xf8, - 0x43, 0x31, 0x4b, 0x78, 0xb8, 0x2a, 0x8f, 0x96, 0x26, 0x4f, 0xee, 0x95, 0xe4, 0x39, 0x85, 0xaa, - 0x50, 0x3f, 0xd3, 0x1e, 0x78, 0x13, 0x0a, 0x8c, 0x9f, 0xdc, 0x02, 0xf3, 0x09, 0xb0, 0xd2, 0x3b, - 0x39, 0x21, 0x9e, 0x81, 0xea, 0xbe, 0x6f, 0xfa, 0x03, 0x4f, 0x6e, 0x81, 0xdf, 0x6b, 0x50, 0x93, - 0x3d, 0x59, 0xcb, 0x2c, 0xf2, 0x7a, 0xca, 0x63, 0x5e, 0x70, 0x39, 0xbd, 0x04, 0x85, 0xce, 0xe1, - 0xbe, 0xf5, 0x89, 0x2c, 0x66, 0x89, 0x16, 0xed, 0xef, 0x72, 0x1c, 0x5e, 0x50, 0x16, 0x2d, 0x7a, - 0xd1, 0x71, 0xcd, 0x17, 0xfe, 0x96, 0xdd, 0x21, 0xa7, 0x2c, 0x9f, 0x98, 0x34, 0xc2, 0x0e, 0x76, - 0x37, 0x11, 0x85, 0x67, 0x96, 0x7f, 0xa9, 0x85, 0xe8, 0x59, 0xb8, 0xb0, 0x36, 0xf0, 0x8f, 0x9b, - 0xb6, 0x79, 0xd8, 0x95, 0x41, 0x00, 0xcf, 0x01, 0xa2, 0x9d, 0x1b, 0x96, 0xa7, 0xf6, 0x36, 0x61, - 0x96, 0xf6, 0x12, 0xdb, 0xb7, 0xda, 0x4a, 0xc4, 0x90, 0x61, 0x5b, 0x8b, 0x85, 0x6d, 0xd3, 0xf3, - 0x5e, 0x3a, 0x6e, 0x47, 0xa8, 0x16, 0xb4, 0xf1, 0x06, 0x67, 0xfe, 0xcc, 0x8b, 0x04, 0xe6, 0xaf, - 0xca, 0x65, 0x29, 0xe4, 0xf2, 0x98, 0xf8, 0x63, 0xb8, 0xe0, 0x07, 0x70, 0x51, 0x52, 0x8a, 0xfa, - 0xc5, 0x18, 0xe2, 0x3d, 0xb8, 0x26, 0x89, 0xd7, 0x8f, 0x69, 0x56, 0xfd, 0x54, 0x00, 0xfe, 0xa7, - 0x72, 0x3e, 0x82, 0x46, 0x20, 0x27, 0xcb, 0xb4, 0x9c, 0xae, 0x2a, 0xc0, 0xc0, 0x13, 0x7b, 0xa6, - 0x64, 0xb0, 0x6f, 0xda, 0xe7, 0x3a, 0xdd, 0xe0, 0x10, 0xa4, 0xdf, 0x78, 0x1d, 0xe6, 0x25, 0x0f, - 0x91, 0x03, 0x45, 0x99, 0x8c, 0x08, 0x94, 0xc4, 0x44, 0x18, 0x8c, 0x4e, 0x1d, 0x6f, 0x76, 0x95, - 0x32, 0x6a, 0x5a, 0xc6, 0x53, 0x53, 0x78, 0x5e, 0xe4, 0x3b, 0x82, 0x0a, 0xa6, 0x06, 0x6d, 0xd1, - 0x4d, 0x19, 0xa8, 0xdd, 0x62, 0x21, 0x68, 0xf7, 0xc8, 0x42, 0x8c, 0xb0, 0xfe, 0x08, 0x16, 0x02, - 0x21, 0xa8, 0xdd, 0x9e, 0x12, 0xb7, 0x67, 0x79, 0x9e, 0x72, 0xe3, 0x4e, 0x52, 0xfc, 0x0e, 0x4c, - 0xf6, 0x89, 0x88, 0x29, 0xe5, 0x55, 0xb4, 0xcc, 0x9f, 0x87, 0x96, 0x95, 0xc9, 0x6c, 0x1c, 0x77, - 0xe0, 0xba, 0xe4, 0xce, 0x2d, 0x9a, 0xc8, 0x3e, 0x2e, 0x94, 0xbc, 0x8d, 0x71, 0xb3, 0x8e, 0xde, - 0xc6, 0xf2, 0x7c, 0xed, 0xe5, 0x6d, 0x8c, 0x9e, 0x15, 0xaa, 0x6f, 0x65, 0x3a, 0x2b, 0xb6, 0xb9, - 0x4d, 0x03, 0x97, 0xcc, 0xc4, 0xec, 0x10, 0xe6, 0xa2, 0x9e, 0x9c, 0x29, 0x8c, 0xcd, 0xc1, 0x94, - 0xef, 0x9c, 0x10, 0x19, 0xc4, 0x78, 0x43, 0x0a, 0x1c, 0xb8, 0x79, 0x26, 0x81, 0xcd, 0x90, 0x19, - 0xdb, 0x92, 0x59, 0xe5, 0xa5, 0xab, 0x29, 0xf3, 0x19, 0xde, 0xc0, 0xbb, 0x70, 0x29, 0x1e, 0x26, - 0x32, 0x89, 0xfc, 0x9c, 0x6f, 0xe0, 0xa4, 0x48, 0x92, 0x89, 0xef, 0xfb, 0x61, 0x30, 0x50, 0x02, - 0x4a, 0x26, 0x96, 0x06, 0xe8, 0x49, 0xf1, 0xe5, 0xbf, 0xb1, 0x5f, 0x83, 0x70, 0x93, 0x89, 0x99, - 0x17, 0x32, 0xcb, 0xbe, 0xfc, 0x61, 0x8c, 0xc8, 0x8f, 0x8d, 0x11, 0xc2, 0x49, 0xc2, 0x28, 0xf6, - 0x35, 0x6c, 0x3a, 0x81, 0x11, 0x06, 0xd0, 0xac, 0x18, 0xf4, 0x0c, 0x09, 0x30, 0x58, 0x43, 0x6e, - 0x6c, 0x35, 0xec, 0x66, 0x5a, 0x8c, 0x0f, 0xc2, 0xd8, 0x39, 0x12, 0x99, 0x33, 0x31, 0xfe, 0x10, - 0x16, 0xd3, 0x83, 0x72, 0x16, 0xce, 0xf7, 0x31, 0x94, 0x82, 0x84, 0x52, 0x79, 0x5a, 0x2d, 0x43, - 0x71, 0x77, 0x6f, 0xff, 0xe9, 0xda, 0x7a, 0xb3, 0xae, 0xad, 0xfe, 0x33, 0x0f, 0xb9, 0xed, 0xe7, - 0xe8, 0x5b, 0x30, 0xc5, 0x1f, 0x5e, 0xc6, 0xbc, 0x4b, 0xe9, 0xe3, 0x9e, 0x70, 0xf0, 0xd5, 0x4f, - 0xff, 0xf4, 0xd7, 0x2f, 0x72, 0x97, 0xf0, 0x85, 0x95, 0xe1, 0x5b, 0x66, 0xb7, 0x7f, 0x6c, 0xae, - 0x9c, 0x0c, 0x57, 0xd8, 0x99, 0xf0, 0x50, 0xbb, 0x8f, 0x9e, 0x43, 0xfe, 0xe9, 0xc0, 0x47, 0xa9, - 0x8f, 0x56, 0x7a, 0xfa, 0xd3, 0x0e, 0xd6, 0x19, 0xe7, 0x39, 0x3c, 0xa3, 0x72, 0xee, 0x0f, 0x7c, - 0xca, 0x77, 0x08, 0x65, 0xe5, 0x75, 0x06, 0x9d, 0xfb, 0x9c, 0xa5, 0x9f, 0xff, 0xf2, 0x83, 0x31, - 0xc3, 0xbb, 0x8a, 0x2f, 0xab, 0x78, 0xfc, 0x11, 0x49, 0xd5, 0xe7, 0xe0, 0xd4, 0x8e, 0xeb, 0x13, - 0x3e, 0x30, 0xc4, 0xf5, 0x51, 0x8a, 0xfa, 0xc9, 0xfa, 0xf8, 0xa7, 0x36, 0xe5, 0xeb, 0x88, 0x17, - 0xa5, 0xb6, 0x8f, 0xae, 0x27, 0xbc, 0x48, 0xa8, 0xb5, 0x77, 0x7d, 0x31, 0x9d, 0x40, 0x20, 0xdd, - 0x60, 0x48, 0x57, 0xf0, 0x25, 0x15, 0xa9, 0x1d, 0xd0, 0x3d, 0xd4, 0xee, 0xaf, 0x1e, 0xc3, 0x14, - 0xab, 0x18, 0xa2, 0x96, 0xfc, 0xd0, 0x13, 0x6a, 0x9d, 0x29, 0x3b, 0x20, 0x52, 0x6b, 0xc4, 0xf3, - 0x0c, 0x6d, 0x16, 0xd7, 0x02, 0x34, 0x56, 0x34, 0x7c, 0xa8, 0xdd, 0x5f, 0xd2, 0xde, 0xd0, 0x56, - 0xbf, 0x3f, 0x09, 0x53, 0xac, 0x52, 0x83, 0xfa, 0x00, 0x61, 0x0d, 0x2e, 0xae, 0xe7, 0x48, 0x55, - 0x2f, 0xae, 0xe7, 0x68, 0xf9, 0x0e, 0x5f, 0x67, 0xc8, 0xf3, 0x78, 0x2e, 0x40, 0x66, 0xaf, 0xe0, - 0x2b, 0xac, 0x26, 0x43, 0xcd, 0xfa, 0x12, 0xca, 0x4a, 0x2d, 0x0d, 0x25, 0x71, 0x8c, 0x14, 0xe3, - 0xe2, 0xdb, 0x24, 0xa1, 0x10, 0x87, 0x6f, 0x32, 0xd0, 0x6b, 0xb8, 0xa1, 0x1a, 0x97, 0xe3, 0xba, - 0x8c, 0x92, 0x02, 0x7f, 0xa6, 0x41, 0x2d, 0x5a, 0x4f, 0x43, 0x37, 0x13, 0x58, 0xc7, 0xcb, 0x72, - 0xfa, 0xad, 0xf1, 0x44, 0xa9, 0x22, 0x70, 0xfc, 0x13, 0x42, 0xfa, 0x26, 0xa5, 0x14, 0xb6, 0x47, - 0x3f, 0xd0, 0x60, 0x26, 0x56, 0x25, 0x43, 0x49, 0x10, 0x23, 0x35, 0x38, 0xfd, 0xf6, 0x39, 0x54, - 0x42, 0x92, 0xbb, 0x4c, 0x92, 0x1b, 0xf8, 0xea, 0xa8, 0x31, 0x7c, 0xab, 0x47, 0x7c, 0x47, 0x48, - 0xb3, 0xfa, 0xaf, 0x3c, 0x14, 0xd7, 0xf9, 0xaf, 0x8c, 0x90, 0x0f, 0xa5, 0xa0, 0xf2, 0x84, 0x16, - 0x92, 0xaa, 0x12, 0x61, 0xca, 0xae, 0x5f, 0x4f, 0x1d, 0x17, 0x22, 0xdc, 0x61, 0x22, 0x2c, 0xe2, - 0x2b, 0x81, 0x08, 0xe2, 0xd7, 0x4c, 0x2b, 0xfc, 0xf2, 0xbd, 0x62, 0x76, 0x3a, 0x74, 0x49, 0xbe, - 0xa7, 0x41, 0x45, 0x2d, 0x28, 0xa1, 0x1b, 0x89, 0xf5, 0x10, 0xb5, 0x26, 0xa5, 0xe3, 0x71, 0x24, - 0x02, 0xff, 0x1e, 0xc3, 0xbf, 0x89, 0x17, 0xd2, 0xf0, 0x5d, 0x46, 0x1f, 0x15, 0x81, 0x97, 0x90, - 0x92, 0x45, 0x88, 0x54, 0xa8, 0x92, 0x45, 0x88, 0x56, 0xa0, 0xce, 0x17, 0x61, 0xc0, 0xe8, 0xa9, - 0x08, 0xa7, 0x00, 0x61, 0x85, 0x09, 0x25, 0x1a, 0x57, 0xb9, 0xc4, 0xc4, 0x7d, 0x70, 0xb4, 0x38, - 0x95, 0xb0, 0x03, 0x62, 0xd8, 0x5d, 0xcb, 0xa3, 0xbe, 0xb8, 0xfa, 0xdb, 0x49, 0x28, 0x3f, 0x31, - 0x2d, 0xdb, 0x27, 0xb6, 0x69, 0xb7, 0x09, 0x3a, 0x82, 0x29, 0x76, 0x4a, 0xc5, 0x03, 0x8f, 0x5a, - 0xf6, 0x89, 0x07, 0x9e, 0x48, 0x4d, 0x04, 0xdf, 0x66, 0xd0, 0xd7, 0xb1, 0x1e, 0x40, 0xf7, 0x42, - 0xfe, 0x2b, 0xac, 0x9e, 0x41, 0x55, 0x3e, 0x81, 0x02, 0xaf, 0x5f, 0xa0, 0x18, 0xb7, 0x48, 0x9d, - 0x43, 0xbf, 0x9a, 0x3c, 0x98, 0xba, 0xcb, 0x54, 0x2c, 0x8f, 0x11, 0x53, 0xb0, 0x6f, 0x03, 0x84, - 0x05, 0xb3, 0xb8, 0x7d, 0x47, 0xea, 0x6b, 0xfa, 0x62, 0x3a, 0x81, 0x00, 0xbe, 0xcf, 0x80, 0x6f, - 0xe1, 0xeb, 0x89, 0xc0, 0x9d, 0x60, 0x02, 0x05, 0x6f, 0xc3, 0xe4, 0xa6, 0xe9, 0x1d, 0xa3, 0xd8, - 0x21, 0xa4, 0xbc, 0x92, 0xea, 0x7a, 0xd2, 0x90, 0x80, 0xba, 0xc5, 0xa0, 0x16, 0xf0, 0x7c, 0x22, - 0xd4, 0xb1, 0xe9, 0xd1, 0x98, 0x8e, 0x06, 0x30, 0x2d, 0x5f, 0x3e, 0xd1, 0xb5, 0x98, 0xcd, 0xa2, - 0xaf, 0xa4, 0xfa, 0x42, 0xda, 0xb0, 0x00, 0x5c, 0x62, 0x80, 0x18, 0x5f, 0x4b, 0x36, 0xaa, 0x20, - 0x7f, 0xa8, 0xdd, 0x7f, 0x43, 0x5b, 0xfd, 0x51, 0x1d, 0x26, 0x69, 0xbe, 0x44, 0x4f, 0x91, 0xf0, - 0x9a, 0x19, 0xb7, 0xf0, 0x48, 0x71, 0x27, 0x6e, 0xe1, 0xd1, 0x1b, 0x6a, 0xc2, 0x29, 0xc2, 0x7e, - 0x6b, 0x49, 0x18, 0x15, 0xd5, 0xd8, 0x87, 0xb2, 0x72, 0x19, 0x45, 0x09, 0x1c, 0xa3, 0xa5, 0xa3, - 0xf8, 0x29, 0x92, 0x70, 0x93, 0xc5, 0x8b, 0x0c, 0x54, 0xc7, 0x17, 0xa3, 0xa0, 0x1d, 0x4e, 0x46, - 0x51, 0xbf, 0x03, 0x15, 0xf5, 0xd6, 0x8a, 0x12, 0x98, 0xc6, 0x6a, 0x53, 0xf1, 0x58, 0x91, 0x74, - 0xe9, 0x4d, 0x70, 0x9a, 0xe0, 0x97, 0xa5, 0x92, 0x96, 0xa2, 0x7f, 0x0c, 0x45, 0x71, 0x97, 0x4d, - 0xd2, 0x37, 0x5a, 0xcd, 0x4a, 0xd2, 0x37, 0x76, 0x11, 0x4e, 0x48, 0x49, 0x18, 0x2c, 0xcd, 0xd9, - 0x65, 0x80, 0x16, 0x90, 0x8f, 0x89, 0x9f, 0x06, 0x19, 0xd6, 0x67, 0xd2, 0x20, 0x95, 0xfb, 0xd2, - 0x58, 0xc8, 0x23, 0xe2, 0x8b, 0xbd, 0x2c, 0x2f, 0x23, 0x28, 0x85, 0xa3, 0x1a, 0x0d, 0xf1, 0x38, - 0x92, 0xd4, 0x2c, 0x32, 0x44, 0x15, 0xa1, 0x10, 0x7d, 0x17, 0x20, 0xbc, 0x78, 0xc7, 0x13, 0x83, - 0xc4, 0xea, 0x5d, 0x3c, 0x31, 0x48, 0xbe, 0xbb, 0x27, 0x78, 0x70, 0x08, 0xce, 0x33, 0x59, 0x0a, - 0xff, 0x13, 0x0d, 0xd0, 0xe8, 0x45, 0x1d, 0x3d, 0x48, 0x86, 0x48, 0x2c, 0x0c, 0xea, 0xaf, 0xbd, - 0x1a, 0x71, 0x6a, 0xf4, 0x0c, 0xe5, 0x6a, 0xb3, 0x29, 0xfd, 0x97, 0x54, 0xb2, 0xcf, 0x35, 0xa8, - 0x46, 0xae, 0xfa, 0xe8, 0x4e, 0xca, 0x3a, 0xc7, 0x8a, 0x8b, 0xfa, 0xdd, 0x73, 0xe9, 0x52, 0x73, - 0x27, 0x65, 0x57, 0xc8, 0xbc, 0xf1, 0x87, 0x1a, 0xd4, 0xa2, 0xf5, 0x01, 0x94, 0x02, 0x30, 0x52, - 0xa1, 0xd4, 0x97, 0xce, 0x27, 0x7c, 0x85, 0xd5, 0x0a, 0x53, 0xc9, 0x8f, 0xa1, 0x28, 0xca, 0x0a, - 0x49, 0x6e, 0x11, 0x2d, 0x70, 0x26, 0xb9, 0x45, 0xac, 0x26, 0x91, 0xe6, 0x16, 0xf4, 0x86, 0xae, - 0x78, 0xa2, 0x28, 0x3e, 0xa4, 0x41, 0x8e, 0xf7, 0xc4, 0x58, 0xe5, 0x62, 0x2c, 0x64, 0xe8, 0x89, - 0xb2, 0xf4, 0x80, 0x52, 0x38, 0x9e, 0xe3, 0x89, 0xf1, 0xca, 0x45, 0x9a, 0x27, 0x32, 0x54, 0xc5, - 0x13, 0xc3, 0x4a, 0x41, 0x92, 0x27, 0x8e, 0x94, 0x6f, 0x93, 0x3c, 0x71, 0xb4, 0xd8, 0x90, 0xb6, - 0xb6, 0x0c, 0x3c, 0xe2, 0x89, 0xb3, 0x09, 0x95, 0x05, 0xf4, 0x5a, 0x8a, 0x4d, 0x13, 0x4b, 0xc3, - 0xfa, 0xeb, 0xaf, 0x48, 0x3d, 0xde, 0x03, 0xf8, 0x6a, 0x48, 0x0f, 0xf8, 0x85, 0x06, 0x73, 0x49, - 0xa5, 0x09, 0x94, 0x02, 0x96, 0x52, 0x57, 0xd6, 0x97, 0x5f, 0x95, 0xfc, 0x15, 0xec, 0x16, 0xf8, - 0xc4, 0xa3, 0xfa, 0xef, 0xbe, 0x5c, 0xd0, 0xfe, 0xf8, 0xe5, 0x82, 0xf6, 0xe7, 0x2f, 0x17, 0xb4, - 0x9f, 0xfe, 0x65, 0x61, 0xe2, 0xb0, 0xc0, 0xfe, 0xc3, 0xc3, 0x5b, 0xff, 0x0e, 0x00, 0x00, 0xff, - 0xff, 0x73, 0x7e, 0xb4, 0xb4, 0x77, 0x31, 0x00, 0x00, + // 3450 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x3b, 0x5b, 0x6f, 0x1b, 0xc7, + 0xb9, 0x5a, 0x5e, 0xc5, 0x8f, 0x17, 0xd1, 0x23, 0xd9, 0xa6, 0x68, 0x5b, 0x96, 0xc7, 0x37, 0xd9, + 0x4e, 0xa4, 0x44, 0xc9, 0x39, 0x0f, 0x3e, 0x41, 0x70, 0x64, 0x89, 0xb1, 0x74, 0x24, 0x4b, 0xce, + 0x4a, 0x76, 0x72, 0x80, 0xa0, 0xc4, 0x8a, 0x1c, 0x53, 0x0b, 0x91, 0xbb, 0xcc, 0xee, 0x92, 0x96, + 0xd2, 0x14, 0x28, 0xd2, 0x04, 0x45, 0x0b, 0xf4, 0xa5, 0x79, 0xe8, 0xed, 0xb1, 0x28, 0x8a, 0xfc, + 0x80, 0xbe, 0xf5, 0x07, 0x14, 0x7d, 0x69, 0x81, 0xfe, 0x81, 0x22, 0xed, 0x63, 0xdf, 0xfb, 0x54, + 0xb4, 0x98, 0xdb, 0xee, 0xec, 0x72, 0x97, 0x52, 0xca, 0x26, 0x2f, 0xd6, 0xce, 0x37, 0xdf, 0x7c, + 0xb7, 0x99, 0xef, 0x32, 0xdf, 0xd0, 0x50, 0x70, 0xfa, 0xad, 0xe5, 0xbe, 0x63, 0x7b, 0x36, 0x2a, + 0x11, 0xaf, 0xd5, 0x76, 0x89, 0x33, 0x24, 0x4e, 0xff, 0xb0, 0x3e, 0xd7, 0xb1, 0x3b, 0x36, 0x9b, + 0x58, 0xa1, 0x5f, 0x1c, 0xa7, 0x3e, 0x4f, 0x71, 0x56, 0x7a, 0xc3, 0x56, 0x8b, 0xfd, 0xd3, 0x3f, + 0x5c, 0x39, 0x1e, 0x8a, 0xa9, 0x2b, 0x6c, 0xca, 0x18, 0x78, 0x47, 0xec, 0x9f, 0xfe, 0x21, 0xfb, + 0x23, 0x26, 0xaf, 0x76, 0x6c, 0xbb, 0xd3, 0x25, 0x2b, 0x46, 0xdf, 0x5c, 0x31, 0x2c, 0xcb, 0xf6, + 0x0c, 0xcf, 0xb4, 0x2d, 0x97, 0xcf, 0xe2, 0xcf, 0x34, 0xa8, 0xe8, 0xc4, 0xed, 0xdb, 0x96, 0x4b, + 0x36, 0x89, 0xd1, 0x26, 0x0e, 0xba, 0x06, 0xd0, 0xea, 0x0e, 0x5c, 0x8f, 0x38, 0x4d, 0xb3, 0x5d, + 0xd3, 0x16, 0xb5, 0xa5, 0x8c, 0x5e, 0x10, 0x90, 0xad, 0x36, 0xba, 0x02, 0x85, 0x1e, 0xe9, 0x1d, + 0xf2, 0xd9, 0x14, 0x9b, 0x9d, 0xe6, 0x80, 0xad, 0x36, 0xaa, 0xc3, 0xb4, 0x43, 0x86, 0xa6, 0x6b, + 0xda, 0x56, 0x2d, 0xbd, 0xa8, 0x2d, 0xa5, 0x75, 0x7f, 0x4c, 0x17, 0x3a, 0xc6, 0x0b, 0xaf, 0xe9, + 0x11, 0xa7, 0x57, 0xcb, 0xf0, 0x85, 0x14, 0x70, 0x40, 0x9c, 0x1e, 0xfe, 0x34, 0x0b, 0x25, 0xdd, + 0xb0, 0x3a, 0x44, 0x27, 0x1f, 0x0e, 0x88, 0xeb, 0xa1, 0x2a, 0xa4, 0x8f, 0xc9, 0x29, 0x63, 0x5f, + 0xd2, 0xe9, 0x27, 0x5f, 0x6f, 0x75, 0x48, 0x93, 0x58, 0x9c, 0x71, 0x89, 0xae, 0xb7, 0x3a, 0xa4, + 0x61, 0xb5, 0xd1, 0x1c, 0x64, 0xbb, 0x66, 0xcf, 0xf4, 0x04, 0x57, 0x3e, 0x08, 0x89, 0x93, 0x89, + 0x88, 0xb3, 0x0e, 0xe0, 0xda, 0x8e, 0xd7, 0xb4, 0x9d, 0x36, 0x71, 0x6a, 0xd9, 0x45, 0x6d, 0xa9, + 0xb2, 0x7a, 0x6b, 0x59, 0xdd, 0x88, 0x65, 0x55, 0xa0, 0xe5, 0x7d, 0xdb, 0xf1, 0xf6, 0x28, 0xae, + 0x5e, 0x70, 0xe5, 0x27, 0x7a, 0x07, 0x8a, 0x8c, 0x88, 0x67, 0x38, 0x1d, 0xe2, 0xd5, 0x72, 0x8c, + 0xca, 0xed, 0x33, 0xa8, 0x1c, 0x30, 0x64, 0x9d, 0xb1, 0xe7, 0xdf, 0x08, 0x43, 0xc9, 0x25, 0x8e, + 0x69, 0x74, 0xcd, 0x8f, 0x8c, 0xc3, 0x2e, 0xa9, 0xe5, 0x17, 0xb5, 0xa5, 0x69, 0x3d, 0x04, 0xa3, + 0xfa, 0x1f, 0x93, 0x53, 0xb7, 0x69, 0x5b, 0xdd, 0xd3, 0xda, 0x34, 0x43, 0x98, 0xa6, 0x80, 0x3d, + 0xab, 0x7b, 0xca, 0x36, 0xcd, 0x1e, 0x58, 0x1e, 0x9f, 0x2d, 0xb0, 0xd9, 0x02, 0x83, 0xb0, 0xe9, + 0x25, 0xa8, 0xf6, 0x4c, 0xab, 0xd9, 0xb3, 0xdb, 0x4d, 0xdf, 0x20, 0xc0, 0x0c, 0x52, 0xe9, 0x99, + 0xd6, 0x13, 0xbb, 0xad, 0x4b, 0xb3, 0x50, 0x4c, 0xe3, 0x24, 0x8c, 0x59, 0x14, 0x98, 0xc6, 0x89, + 0x8a, 0xb9, 0x0c, 0xb3, 0x94, 0x66, 0xcb, 0x21, 0x86, 0x47, 0x02, 0xe4, 0x12, 0x43, 0xbe, 0xd0, + 0x33, 0xad, 0x75, 0x36, 0x13, 0xc2, 0x37, 0x4e, 0x46, 0xf0, 0xcb, 0x02, 0xdf, 0x38, 0x09, 0xe3, + 0xe3, 0x65, 0x28, 0xf8, 0x36, 0x47, 0xd3, 0x90, 0xd9, 0xdd, 0xdb, 0x6d, 0x54, 0xa7, 0x10, 0x40, + 0x6e, 0x6d, 0x7f, 0xbd, 0xb1, 0xbb, 0x51, 0xd5, 0x50, 0x11, 0xf2, 0x1b, 0x0d, 0x3e, 0x48, 0xe1, + 0x47, 0x00, 0x81, 0x75, 0x51, 0x1e, 0xd2, 0xdb, 0x8d, 0xff, 0xaf, 0x4e, 0x51, 0x9c, 0xe7, 0x0d, + 0x7d, 0x7f, 0x6b, 0x6f, 0xb7, 0xaa, 0xd1, 0xc5, 0xeb, 0x7a, 0x63, 0xed, 0xa0, 0x51, 0x4d, 0x51, + 0x8c, 0x27, 0x7b, 0x1b, 0xd5, 0x34, 0x2a, 0x40, 0xf6, 0xf9, 0xda, 0xce, 0xb3, 0x46, 0x35, 0x83, + 0x3f, 0xd7, 0xa0, 0x2c, 0xf6, 0x8b, 0xfb, 0x04, 0x7a, 0x13, 0x72, 0x47, 0xcc, 0x2f, 0xd8, 0x51, + 0x2c, 0xae, 0x5e, 0x8d, 0x6c, 0x6e, 0xc8, 0x77, 0x74, 0x81, 0x8b, 0x30, 0xa4, 0x8f, 0x87, 0x6e, + 0x2d, 0xb5, 0x98, 0x5e, 0x2a, 0xae, 0x56, 0x97, 0xb9, 0xc3, 0x2e, 0x6f, 0x93, 0xd3, 0xe7, 0x46, + 0x77, 0x40, 0x74, 0x3a, 0x89, 0x10, 0x64, 0x7a, 0xb6, 0x43, 0xd8, 0x89, 0x9d, 0xd6, 0xd9, 0x37, + 0x3d, 0xc6, 0x6c, 0xd3, 0xc4, 0x69, 0xe5, 0x03, 0xfc, 0x85, 0x06, 0xf0, 0x74, 0xe0, 0x25, 0xbb, + 0xc6, 0x1c, 0x64, 0x87, 0x94, 0xb0, 0x70, 0x0b, 0x3e, 0x60, 0x3e, 0x41, 0x0c, 0x97, 0xf8, 0x3e, + 0x41, 0x07, 0xe8, 0x32, 0xe4, 0xfb, 0x0e, 0x19, 0x36, 0x8f, 0x87, 0x8c, 0xc9, 0xb4, 0x9e, 0xa3, + 0xc3, 0xed, 0x21, 0xba, 0x01, 0x25, 0xb3, 0x63, 0xd9, 0x0e, 0x69, 0x72, 0x5a, 0x59, 0x36, 0x5b, + 0xe4, 0x30, 0x26, 0xb7, 0x82, 0xc2, 0x09, 0xe7, 0x54, 0x94, 0x1d, 0x0a, 0xc2, 0x16, 0x14, 0x99, + 0xa8, 0x13, 0x99, 0xef, 0x5e, 0x20, 0x63, 0x8a, 0x2d, 0x1b, 0x35, 0xa1, 0x90, 0x1a, 0x7f, 0x00, + 0x68, 0x83, 0x74, 0x89, 0x47, 0x26, 0x89, 0x1e, 0x8a, 0x4d, 0xd2, 0xaa, 0x4d, 0xf0, 0x8f, 0x35, + 0x98, 0x0d, 0x91, 0x9f, 0x48, 0xad, 0x1a, 0xe4, 0xdb, 0x8c, 0x18, 0x97, 0x20, 0xad, 0xcb, 0x21, + 0x7a, 0x00, 0xd3, 0x42, 0x00, 0xb7, 0x96, 0x4e, 0x38, 0x34, 0x79, 0x2e, 0x93, 0x8b, 0xff, 0xa6, + 0x41, 0x41, 0x28, 0xba, 0xd7, 0x47, 0x6b, 0x50, 0x76, 0xf8, 0xa0, 0xc9, 0xf4, 0x11, 0x12, 0xd5, + 0x93, 0x83, 0xd0, 0xe6, 0x94, 0x5e, 0x12, 0x4b, 0x18, 0x18, 0xfd, 0x0f, 0x14, 0x25, 0x89, 0xfe, + 0xc0, 0x13, 0x26, 0xaf, 0x85, 0x09, 0x04, 0xe7, 0x6f, 0x73, 0x4a, 0x07, 0x81, 0xfe, 0x74, 0xe0, + 0xa1, 0x03, 0x98, 0x93, 0x8b, 0xb9, 0x36, 0x42, 0x8c, 0x34, 0xa3, 0xb2, 0x18, 0xa6, 0x32, 0xba, + 0x55, 0x9b, 0x53, 0x3a, 0x12, 0xeb, 0x95, 0xc9, 0x47, 0x05, 0xc8, 0x0b, 0x28, 0xfe, 0xbb, 0x06, + 0x20, 0x0d, 0xba, 0xd7, 0x47, 0x1b, 0x50, 0x71, 0xc4, 0x28, 0xa4, 0xf0, 0x95, 0x58, 0x85, 0xc5, + 0x3e, 0x4c, 0xe9, 0x65, 0xb9, 0x88, 0xab, 0xfc, 0x36, 0x94, 0x7c, 0x2a, 0x81, 0xce, 0xf3, 0x31, + 0x3a, 0xfb, 0x14, 0x8a, 0x72, 0x01, 0xd5, 0xfa, 0x3d, 0xb8, 0xe8, 0xaf, 0x8f, 0x51, 0xfb, 0xc6, + 0x18, 0xb5, 0x7d, 0x82, 0xb3, 0x92, 0x82, 0xaa, 0x38, 0xd0, 0x94, 0xc5, 0xc1, 0xf8, 0x8b, 0x34, + 0xe4, 0xd7, 0xed, 0x5e, 0xdf, 0x70, 0xe8, 0x1e, 0xe5, 0x1c, 0xe2, 0x0e, 0xba, 0x1e, 0x53, 0xb7, + 0xb2, 0x7a, 0x33, 0xcc, 0x41, 0xa0, 0xc9, 0xbf, 0x3a, 0x43, 0xd5, 0xc5, 0x12, 0xba, 0x58, 0x64, + 0xa8, 0xd4, 0x39, 0x16, 0x8b, 0xfc, 0x24, 0x96, 0x48, 0x5f, 0x4a, 0x07, 0xbe, 0x54, 0x87, 0xfc, + 0x90, 0x38, 0x41, 0x56, 0xdd, 0x9c, 0xd2, 0x25, 0x00, 0xdd, 0x83, 0x99, 0x68, 0x84, 0xcf, 0x0a, + 0x9c, 0x4a, 0x2b, 0x9c, 0x10, 0x6e, 0x42, 0x29, 0x94, 0x66, 0x72, 0x02, 0xaf, 0xd8, 0x53, 0xb2, + 0xcc, 0x25, 0x19, 0xda, 0x68, 0x4a, 0x2c, 0x6d, 0x4e, 0x89, 0xe0, 0x86, 0xff, 0x17, 0xca, 0x21, + 0x5d, 0x69, 0x14, 0x6f, 0xbc, 0xfb, 0x6c, 0x6d, 0x87, 0x87, 0xfc, 0xc7, 0x2c, 0xca, 0xeb, 0x55, + 0x8d, 0x66, 0x8e, 0x9d, 0xc6, 0xfe, 0x7e, 0x35, 0x85, 0xca, 0x50, 0xd8, 0xdd, 0x3b, 0x68, 0x72, + 0xac, 0x34, 0x7e, 0xcb, 0xa7, 0x20, 0x52, 0x86, 0x92, 0x29, 0xa6, 0x94, 0x4c, 0xa1, 0xc9, 0x4c, + 0x91, 0x0a, 0x32, 0x45, 0xfa, 0x51, 0x05, 0x4a, 0xdc, 0x3e, 0xcd, 0x81, 0x45, 0xb3, 0xd5, 0x2f, + 0x35, 0x80, 0x83, 0x13, 0x4b, 0x06, 0xa0, 0x15, 0xc8, 0xb7, 0x38, 0xf1, 0x9a, 0xc6, 0xfc, 0xf9, + 0x62, 0xac, 0xc9, 0x75, 0x89, 0x85, 0x5e, 0x87, 0xbc, 0x3b, 0x68, 0xb5, 0x88, 0x2b, 0xb3, 0xc6, + 0xe5, 0x68, 0x48, 0x11, 0x0e, 0xaf, 0x4b, 0x3c, 0xba, 0xe4, 0x85, 0x61, 0x76, 0x07, 0x2c, 0x87, + 0x8c, 0x5f, 0x22, 0xf0, 0xf0, 0xcf, 0x34, 0x28, 0x32, 0x29, 0x27, 0x8a, 0x63, 0x57, 0xa1, 0xc0, + 0x64, 0x20, 0x6d, 0x11, 0xc9, 0xa6, 0xf5, 0x00, 0x80, 0xfe, 0x1b, 0x0a, 0xf2, 0x04, 0xcb, 0x60, + 0x56, 0x8b, 0x27, 0xbb, 0xd7, 0xd7, 0x03, 0x54, 0xbc, 0x0d, 0x17, 0x98, 0x55, 0x5a, 0xb4, 0x3e, + 0x95, 0x76, 0x54, 0x2b, 0x38, 0x2d, 0x52, 0xc1, 0xd5, 0x61, 0xba, 0x7f, 0x74, 0xea, 0x9a, 0x2d, + 0xa3, 0x2b, 0xa4, 0xf0, 0xc7, 0xf8, 0xff, 0x00, 0xa9, 0xc4, 0x26, 0x51, 0x17, 0x97, 0xa1, 0xb8, + 0x69, 0xb8, 0x47, 0x42, 0x24, 0xfc, 0x3e, 0x94, 0xf8, 0x70, 0x22, 0x1b, 0x22, 0xc8, 0x1c, 0x19, + 0xee, 0x11, 0x13, 0xbc, 0xac, 0xb3, 0x6f, 0x7c, 0x01, 0x66, 0xf6, 0x2d, 0xa3, 0xef, 0x1e, 0xd9, + 0x32, 0xd6, 0xd2, 0xfa, 0xbc, 0x1a, 0xc0, 0x26, 0xe2, 0x78, 0x17, 0x66, 0x1c, 0xd2, 0x33, 0x4c, + 0xcb, 0xb4, 0x3a, 0xcd, 0xc3, 0x53, 0x8f, 0xb8, 0xa2, 0x7c, 0xaf, 0xf8, 0xe0, 0x47, 0x14, 0x4a, + 0x45, 0x3b, 0xec, 0xda, 0x87, 0xc2, 0xe3, 0xd9, 0x37, 0xfe, 0x8d, 0x06, 0xa5, 0xf7, 0x0c, 0xaf, + 0x25, 0xad, 0x80, 0xb6, 0xa0, 0xe2, 0xfb, 0x39, 0x83, 0x08, 0x59, 0x22, 0x01, 0x9f, 0xad, 0x91, + 0x85, 0x9d, 0x0c, 0xf8, 0xe5, 0x96, 0x0a, 0x60, 0xa4, 0x0c, 0xab, 0x45, 0xba, 0x3e, 0xa9, 0x54, + 0x32, 0x29, 0x86, 0xa8, 0x92, 0x52, 0x01, 0x8f, 0x66, 0x82, 0x64, 0xc8, 0xdd, 0xf2, 0xe7, 0x29, + 0x40, 0xa3, 0x32, 0x7c, 0xd5, 0xfa, 0xe0, 0x36, 0x54, 0x5c, 0xcf, 0x70, 0xbc, 0x66, 0xe4, 0x72, + 0x53, 0x66, 0x50, 0x3f, 0x56, 0xdd, 0x85, 0x99, 0xbe, 0x63, 0x77, 0x1c, 0xe2, 0xba, 0x4d, 0xcb, + 0xf6, 0xcc, 0x17, 0xa7, 0xa2, 0xc4, 0xaa, 0x48, 0xf0, 0x2e, 0x83, 0xa2, 0x06, 0xe4, 0x5f, 0x98, + 0x5d, 0x8f, 0x38, 0x6e, 0x2d, 0xbb, 0x98, 0x5e, 0xaa, 0xac, 0x3e, 0x38, 0xcb, 0x6a, 0xcb, 0xef, + 0x30, 0xfc, 0x83, 0xd3, 0x3e, 0xd1, 0xe5, 0x5a, 0xb5, 0x6c, 0xc9, 0x85, 0xca, 0x96, 0xdb, 0x00, + 0x01, 0x3e, 0x8d, 0x5a, 0xbb, 0x7b, 0x4f, 0x9f, 0x1d, 0x54, 0xa7, 0x50, 0x09, 0xa6, 0x77, 0xf7, + 0x36, 0x1a, 0x3b, 0x0d, 0x1a, 0xd7, 0xf0, 0x8a, 0xb4, 0x8d, 0x6a, 0x43, 0x34, 0x0f, 0xd3, 0x2f, + 0x29, 0x54, 0xde, 0xfe, 0xd2, 0x7a, 0x9e, 0x8d, 0xb7, 0xda, 0xf8, 0x47, 0x29, 0x28, 0x8b, 0x53, + 0x30, 0xd1, 0x51, 0x54, 0x59, 0xa4, 0x42, 0x2c, 0x68, 0x8d, 0xc4, 0x4f, 0x47, 0x5b, 0x94, 0x62, + 0x72, 0x48, 0xdd, 0x9d, 0x6f, 0x36, 0x69, 0x0b, 0xb3, 0xfa, 0x63, 0x74, 0x0f, 0xaa, 0x2d, 0xee, + 0xee, 0x91, 0xb4, 0xa3, 0xcf, 0x08, 0xb8, 0x92, 0x75, 0xca, 0xfe, 0x69, 0x33, 0x5c, 0x91, 0x76, + 0x0a, 0x7a, 0x49, 0x1e, 0x24, 0x0a, 0x43, 0xb7, 0x21, 0x47, 0x86, 0xc4, 0xf2, 0xdc, 0x5a, 0x91, + 0x05, 0xb0, 0xb2, 0xac, 0xc6, 0x1a, 0x14, 0xaa, 0x8b, 0x49, 0xfc, 0x5f, 0x70, 0x81, 0x55, 0xbd, + 0x8f, 0x1d, 0xc3, 0x52, 0xcb, 0xf3, 0x83, 0x83, 0x1d, 0x61, 0x3a, 0xfa, 0x89, 0x2a, 0x90, 0xda, + 0xda, 0x10, 0x8a, 0xa6, 0xb6, 0x36, 0xf0, 0x27, 0x1a, 0x20, 0x75, 0xdd, 0x44, 0xb6, 0x8c, 0x10, + 0x97, 0xec, 0xd3, 0x01, 0xfb, 0x39, 0xc8, 0x12, 0xc7, 0xb1, 0x1d, 0x66, 0xb5, 0x82, 0xce, 0x07, + 0xf8, 0x96, 0x90, 0x41, 0x27, 0x43, 0xfb, 0xd8, 0x77, 0x0c, 0x4e, 0x4d, 0xf3, 0x45, 0xdd, 0x86, + 0xd9, 0x10, 0xd6, 0x44, 0x81, 0xf4, 0x2e, 0x5c, 0x64, 0xc4, 0xb6, 0x09, 0xe9, 0xaf, 0x75, 0xcd, + 0x61, 0x22, 0xd7, 0x3e, 0x5c, 0x8a, 0x22, 0x7e, 0xbd, 0x36, 0xc2, 0x6f, 0x09, 0x8e, 0x07, 0x66, + 0x8f, 0x1c, 0xd8, 0x3b, 0xc9, 0xb2, 0xd1, 0xe8, 0x48, 0x6f, 0xdd, 0x22, 0xe3, 0xb0, 0x6f, 0xfc, + 0x2b, 0x0d, 0x2e, 0x8f, 0x2c, 0xff, 0x9a, 0x77, 0x75, 0x01, 0xa0, 0x43, 0x8f, 0x0f, 0x69, 0xd3, + 0x09, 0x7e, 0x5f, 0x54, 0x20, 0xbe, 0x9c, 0x34, 0xc0, 0x94, 0x84, 0x9c, 0x47, 0x90, 0x7b, 0xc2, + 0x5a, 0x35, 0x8a, 0x56, 0x19, 0xa9, 0x95, 0x65, 0xf4, 0xf8, 0x05, 0xb2, 0xa0, 0xb3, 0x6f, 0x96, + 0x5f, 0x09, 0x71, 0x9e, 0xe9, 0x3b, 0x3c, 0x8f, 0x17, 0x74, 0x7f, 0x4c, 0xb9, 0xb7, 0xba, 0x26, + 0xb1, 0x3c, 0x36, 0x9b, 0x61, 0xb3, 0x0a, 0x04, 0x2f, 0x43, 0x95, 0x73, 0x5a, 0x6b, 0xb7, 0x95, + 0x5c, 0xee, 0xd3, 0xd3, 0xc2, 0xf4, 0xf0, 0xaf, 0x35, 0xb8, 0xa0, 0x2c, 0x98, 0xc8, 0x76, 0xaf, + 0x40, 0x8e, 0x37, 0xa4, 0x44, 0x1e, 0x99, 0x0b, 0xaf, 0xe2, 0x6c, 0x74, 0x81, 0x83, 0x96, 0x21, + 0xcf, 0xbf, 0x64, 0xb1, 0x12, 0x8f, 0x2e, 0x91, 0xf0, 0x6d, 0x98, 0x15, 0x20, 0xd2, 0xb3, 0xe3, + 0x8e, 0x09, 0x33, 0x28, 0xfe, 0x18, 0xe6, 0xc2, 0x68, 0x13, 0xa9, 0xa4, 0x08, 0x99, 0x3a, 0x8f, + 0x90, 0x6b, 0x52, 0xc8, 0x67, 0xfd, 0xb6, 0x92, 0xf6, 0xa2, 0xbb, 0xae, 0xee, 0x48, 0x2a, 0xb2, + 0x23, 0xbe, 0x02, 0x92, 0xc4, 0x37, 0xaa, 0xc0, 0xac, 0x3c, 0x0e, 0x3b, 0xa6, 0xeb, 0x17, 0x43, + 0x1f, 0x01, 0x52, 0x81, 0xdf, 0xb4, 0x40, 0x1b, 0xe4, 0x85, 0x63, 0x74, 0x7a, 0xc4, 0x0f, 0xf5, + 0xb4, 0xca, 0x54, 0x81, 0x13, 0x05, 0xc7, 0x3f, 0x68, 0x50, 0x5a, 0xeb, 0x1a, 0x4e, 0x4f, 0x6e, + 0xd6, 0xdb, 0x90, 0xe3, 0xe5, 0xab, 0xb8, 0xf1, 0xdd, 0x09, 0x93, 0x51, 0x71, 0xf9, 0x60, 0x8d, + 0x17, 0xbb, 0x62, 0x15, 0xdd, 0x5c, 0xd1, 0x97, 0xdd, 0x88, 0xf4, 0x69, 0x37, 0xd0, 0xab, 0x90, + 0x35, 0xe8, 0x12, 0x16, 0x50, 0x2a, 0xd1, 0x8b, 0x03, 0xa3, 0xc6, 0x4a, 0x0d, 0x8e, 0x85, 0xdf, + 0x84, 0xa2, 0xc2, 0x81, 0xde, 0x87, 0x1e, 0x37, 0x44, 0x39, 0xb1, 0xb6, 0x7e, 0xb0, 0xf5, 0x9c, + 0x5f, 0x93, 0x2a, 0x00, 0x1b, 0x0d, 0x7f, 0x9c, 0xc2, 0xef, 0x8b, 0x55, 0x22, 0xe4, 0xa8, 0xf2, + 0x68, 0x49, 0xf2, 0xa4, 0xce, 0x25, 0xcf, 0x09, 0x94, 0x85, 0xfa, 0x13, 0x9d, 0x81, 0xd7, 0x21, + 0xc7, 0xe8, 0xc9, 0x23, 0x30, 0x1f, 0xc3, 0x56, 0x46, 0x0b, 0x8e, 0x88, 0x67, 0xa0, 0xbc, 0xef, + 0x19, 0xde, 0xc0, 0x95, 0x47, 0xe0, 0xf7, 0x1a, 0x54, 0x24, 0x64, 0xd2, 0xe6, 0x90, 0xbc, 0x54, + 0xf3, 0x20, 0xec, 0x5f, 0xa9, 0x2f, 0x41, 0xae, 0x7d, 0xb8, 0x6f, 0x7e, 0x24, 0x1b, 0x79, 0x62, + 0x44, 0xe1, 0x5d, 0xce, 0x87, 0x77, 0xd3, 0xc5, 0x88, 0x5e, 0xcf, 0x1c, 0xe3, 0x85, 0xb7, 0x65, + 0xb5, 0xc9, 0x09, 0xab, 0x82, 0x32, 0x7a, 0x00, 0x60, 0x37, 0x2a, 0xd1, 0x75, 0x67, 0xa5, 0x8f, + 0xda, 0x85, 0x9f, 0x85, 0x0b, 0x6b, 0x03, 0xef, 0xa8, 0x61, 0x19, 0x87, 0x5d, 0x19, 0x34, 0xf0, + 0x1c, 0x20, 0x0a, 0xdc, 0x30, 0x5d, 0x15, 0xda, 0x80, 0x59, 0x0a, 0x25, 0x96, 0x67, 0xb6, 0x94, + 0x08, 0x23, 0xf3, 0x88, 0x16, 0xc9, 0x23, 0x86, 0xeb, 0xbe, 0xb4, 0x9d, 0xb6, 0x50, 0xcd, 0x1f, + 0xe3, 0x0d, 0x4e, 0xfc, 0x99, 0x1b, 0xca, 0x14, 0x5f, 0x95, 0xca, 0x52, 0x40, 0xe5, 0x31, 0xf1, + 0xc6, 0x50, 0xc1, 0x0f, 0xe0, 0xa2, 0xc4, 0x14, 0x5d, 0x97, 0x31, 0xc8, 0x7b, 0x70, 0x4d, 0x22, + 0xaf, 0x1f, 0xd1, 0xbb, 0xc0, 0x53, 0xc1, 0xf0, 0xdf, 0x95, 0xf3, 0x11, 0xd4, 0x7c, 0x39, 0x59, + 0xe9, 0x67, 0x77, 0x55, 0x01, 0x06, 0xae, 0x38, 0x33, 0x05, 0x9d, 0x7d, 0x53, 0x98, 0x63, 0x77, + 0xfd, 0xac, 0x4c, 0xbf, 0xf1, 0x3a, 0xcc, 0x4b, 0x1a, 0xa2, 0x28, 0x0b, 0x13, 0x19, 0x11, 0x28, + 0x8e, 0x88, 0x30, 0x18, 0x5d, 0x3a, 0xde, 0xec, 0x2a, 0x66, 0xd8, 0xb4, 0x8c, 0xa6, 0xa6, 0xd0, + 0xbc, 0xc8, 0x4f, 0x04, 0x15, 0x4c, 0x0d, 0xda, 0x02, 0x4c, 0x09, 0xa8, 0x60, 0xb1, 0x11, 0x14, + 0x3c, 0xb2, 0x11, 0x23, 0xa4, 0x3f, 0x80, 0x05, 0x5f, 0x08, 0x6a, 0xb7, 0xa7, 0xc4, 0xe9, 0x99, + 0xae, 0xab, 0xf4, 0x09, 0xe2, 0x14, 0xbf, 0x03, 0x99, 0x3e, 0x11, 0x31, 0xa5, 0xb8, 0x8a, 0x96, + 0xf9, 0xdb, 0xd8, 0xb2, 0xb2, 0x98, 0xcd, 0xe3, 0x36, 0x5c, 0x97, 0xd4, 0xb9, 0x45, 0x63, 0xc9, + 0x47, 0x85, 0x92, 0x77, 0x48, 0x6e, 0xd6, 0xd1, 0x3b, 0x64, 0x9a, 0xef, 0xbd, 0xbc, 0x43, 0xd2, + 0x5c, 0xa1, 0xfa, 0xd6, 0x44, 0xb9, 0x62, 0x9b, 0xdb, 0xd4, 0x77, 0xc9, 0x89, 0x88, 0x1d, 0xc2, + 0x5c, 0xd8, 0x93, 0x27, 0x0a, 0x63, 0x73, 0x90, 0xf5, 0xec, 0x63, 0x22, 0x83, 0x18, 0x1f, 0x48, + 0x81, 0x7d, 0x37, 0x9f, 0x48, 0x60, 0x23, 0x20, 0xc6, 0x8e, 0xe4, 0xa4, 0xf2, 0xd2, 0xdd, 0x94, + 0xf5, 0x0f, 0x1f, 0xe0, 0x5d, 0xb8, 0x14, 0x0d, 0x13, 0x13, 0x89, 0xfc, 0x9c, 0x1f, 0xe0, 0xb8, + 0x48, 0x32, 0x11, 0xdd, 0x77, 0x83, 0x60, 0xa0, 0x04, 0x94, 0x89, 0x48, 0xea, 0x50, 0x8f, 0x8b, + 0x2f, 0xff, 0x89, 0xf3, 0xea, 0x87, 0x9b, 0x89, 0x88, 0xb9, 0x01, 0xb1, 0xc9, 0xb7, 0x3f, 0x88, + 0x11, 0xe9, 0xb1, 0x31, 0x42, 0x38, 0x49, 0x10, 0xc5, 0xbe, 0x86, 0x43, 0x27, 0x78, 0x04, 0x01, + 0x74, 0x52, 0x1e, 0x34, 0x87, 0xf8, 0x3c, 0xd8, 0x40, 0x1e, 0x6c, 0x35, 0xec, 0x4e, 0xb4, 0x19, + 0xef, 0x05, 0xb1, 0x73, 0x24, 0x32, 0x4f, 0x44, 0xf8, 0x7d, 0x58, 0x4c, 0x0e, 0xca, 0x93, 0x50, + 0xbe, 0x8f, 0xa1, 0xe0, 0x17, 0x94, 0xca, 0xbb, 0x72, 0x11, 0xf2, 0xbb, 0x7b, 0xfb, 0x4f, 0xd7, + 0xd6, 0x1b, 0x55, 0x6d, 0xf5, 0x1f, 0x69, 0x48, 0x6d, 0x3f, 0x47, 0xdf, 0x82, 0x2c, 0x7f, 0x2e, + 0x1a, 0xf3, 0x9a, 0x56, 0x1f, 0xf7, 0xf0, 0x84, 0xaf, 0x7e, 0xf2, 0xa7, 0xbf, 0x7e, 0x9e, 0xba, + 0x84, 0x2f, 0xac, 0x0c, 0xdf, 0x30, 0xba, 0xfd, 0x23, 0x63, 0xe5, 0x78, 0xb8, 0xc2, 0x72, 0xc2, + 0x43, 0xed, 0x3e, 0x7a, 0x0e, 0xe9, 0xa7, 0x03, 0x0f, 0x25, 0x3e, 0xb5, 0xd5, 0x93, 0x1f, 0xa4, + 0x70, 0x9d, 0x51, 0x9e, 0xc3, 0x33, 0x2a, 0xe5, 0xfe, 0xc0, 0xa3, 0x74, 0x87, 0x50, 0x54, 0xde, + 0x94, 0xd0, 0x99, 0x8f, 0x70, 0xf5, 0xb3, 0xdf, 0xab, 0x30, 0x66, 0xfc, 0xae, 0xe2, 0xcb, 0x2a, + 0x3f, 0xfe, 0xf4, 0xa5, 0xea, 0x73, 0x70, 0x62, 0x45, 0xf5, 0x09, 0x9e, 0x45, 0xa2, 0xfa, 0x28, + 0x4f, 0x11, 0xf1, 0xfa, 0x78, 0x27, 0x16, 0xa5, 0x6b, 0x8b, 0x77, 0xb0, 0x96, 0x87, 0xae, 0xc7, + 0xbc, 0xa3, 0xa8, 0x2f, 0x06, 0xf5, 0xc5, 0x64, 0x04, 0xc1, 0xe9, 0x06, 0xe3, 0x74, 0x05, 0x5f, + 0x52, 0x39, 0xb5, 0x7c, 0xbc, 0x87, 0xda, 0xfd, 0xd5, 0x23, 0xc8, 0xb2, 0x3e, 0x27, 0x6a, 0xca, + 0x8f, 0x7a, 0x4c, 0x87, 0x36, 0xe1, 0x04, 0x84, 0x3a, 0xa4, 0x78, 0x9e, 0x71, 0x9b, 0xc5, 0x15, + 0x9f, 0x1b, 0x6b, 0x75, 0x3e, 0xd4, 0xee, 0x2f, 0x69, 0xaf, 0x69, 0xab, 0xdf, 0xcb, 0x40, 0x96, + 0xb5, 0x8e, 0x50, 0x1f, 0x20, 0x68, 0x0a, 0x46, 0xf5, 0x1c, 0x69, 0x33, 0x46, 0xf5, 0x1c, 0xed, + 0x27, 0xe2, 0xeb, 0x8c, 0xf3, 0x3c, 0x9e, 0xf3, 0x39, 0xb3, 0x57, 0xfb, 0x15, 0xd6, 0x24, 0xa2, + 0x66, 0x7d, 0x09, 0x45, 0xa5, 0xb9, 0x87, 0xe2, 0x28, 0x86, 0xba, 0x83, 0xd1, 0x63, 0x12, 0xd3, + 0x19, 0xc4, 0x37, 0x19, 0xd3, 0x6b, 0xb8, 0xa6, 0x1a, 0x97, 0xf3, 0x75, 0x18, 0x26, 0x65, 0xfc, + 0xa9, 0x06, 0x95, 0x70, 0x83, 0x0f, 0xdd, 0x8c, 0x21, 0x1d, 0xed, 0x13, 0xd6, 0x6f, 0x8d, 0x47, + 0x4a, 0x14, 0x81, 0xf3, 0x3f, 0x26, 0xa4, 0x6f, 0x50, 0x4c, 0x61, 0x7b, 0xf4, 0x7d, 0x0d, 0x66, + 0x22, 0x6d, 0x3b, 0x14, 0xc7, 0x62, 0xa4, 0x29, 0x58, 0xbf, 0x7d, 0x06, 0x96, 0x90, 0xe4, 0x2e, + 0x93, 0xe4, 0x06, 0xbe, 0x3a, 0x6a, 0x0c, 0xcf, 0xec, 0x11, 0xcf, 0x16, 0xd2, 0xac, 0xfe, 0x33, + 0x0d, 0xf9, 0x75, 0xfe, 0x13, 0x2b, 0xe4, 0x41, 0xc1, 0xef, 0x84, 0xa1, 0x85, 0xb8, 0xae, 0x44, + 0x50, 0xb2, 0xd7, 0xaf, 0x27, 0xce, 0x0b, 0x11, 0xee, 0x30, 0x11, 0x16, 0xf1, 0x15, 0x5f, 0x04, + 0xf1, 0x53, 0xae, 0x15, 0x7e, 0xf9, 0x5e, 0x31, 0xda, 0x6d, 0xba, 0x25, 0xdf, 0xd5, 0xa0, 0xa4, + 0x36, 0xac, 0xd0, 0x8d, 0xd8, 0x7e, 0x88, 0xda, 0xf3, 0xaa, 0xe3, 0x71, 0x28, 0x82, 0xff, 0x3d, + 0xc6, 0xff, 0x26, 0x5e, 0x48, 0xe2, 0xef, 0x30, 0xfc, 0xb0, 0x08, 0xbc, 0xe5, 0x14, 0x2f, 0x42, + 0xa8, 0xa3, 0x15, 0x2f, 0x42, 0xb8, 0x63, 0x75, 0xb6, 0x08, 0x03, 0x86, 0x4f, 0x45, 0x38, 0x01, + 0x08, 0x3a, 0x4c, 0x28, 0xd6, 0xb8, 0xca, 0x25, 0x26, 0xea, 0x83, 0xa3, 0xcd, 0xa9, 0x98, 0x13, + 0x10, 0xe1, 0xdd, 0x35, 0x5d, 0xea, 0x8b, 0xab, 0xbf, 0xcd, 0x40, 0xf1, 0x89, 0x61, 0x5a, 0x1e, + 0xb1, 0x0c, 0xab, 0x45, 0x50, 0x07, 0xb2, 0x2c, 0x4b, 0x45, 0x03, 0x8f, 0xda, 0xf6, 0x89, 0x06, + 0x9e, 0x50, 0x4f, 0x04, 0xdf, 0x66, 0xac, 0xaf, 0xe3, 0xba, 0xcf, 0xba, 0x17, 0xd0, 0x5f, 0x61, + 0xfd, 0x0c, 0xaa, 0xf2, 0x31, 0xe4, 0x78, 0xff, 0x02, 0x45, 0xa8, 0x85, 0xfa, 0x1c, 0xf5, 0xab, + 0xf1, 0x93, 0x89, 0xa7, 0x4c, 0xe5, 0xe5, 0x32, 0x64, 0xca, 0xec, 0xdb, 0x00, 0x41, 0xc3, 0x2c, + 0x6a, 0xdf, 0x91, 0xfe, 0x5a, 0x7d, 0x31, 0x19, 0x41, 0x30, 0xbe, 0xcf, 0x18, 0xdf, 0xc2, 0xd7, + 0x63, 0x19, 0xb7, 0xfd, 0x05, 0x94, 0x79, 0x0b, 0x32, 0x9b, 0x86, 0x7b, 0x84, 0x22, 0x49, 0x48, + 0x79, 0xdb, 0xad, 0xd7, 0xe3, 0xa6, 0x04, 0xab, 0x5b, 0x8c, 0xd5, 0x02, 0x9e, 0x8f, 0x65, 0x75, + 0x64, 0xb8, 0x34, 0xa6, 0xa3, 0x01, 0x4c, 0xcb, 0xf7, 0x5a, 0x74, 0x2d, 0x62, 0xb3, 0xf0, 0xdb, + 0x6e, 0x7d, 0x21, 0x69, 0x5a, 0x30, 0x5c, 0x62, 0x0c, 0x31, 0xbe, 0x16, 0x6f, 0x54, 0x81, 0xfe, + 0x50, 0xbb, 0xff, 0x9a, 0xb6, 0xfa, 0xc3, 0x2a, 0x64, 0x68, 0xbd, 0x44, 0xb3, 0x48, 0x70, 0xcd, + 0x8c, 0x5a, 0x78, 0xa4, 0xb9, 0x13, 0xb5, 0xf0, 0xe8, 0x0d, 0x35, 0x26, 0x8b, 0xb0, 0x1f, 0x9a, + 0x12, 0x86, 0x45, 0x35, 0xf6, 0xa0, 0xa8, 0x5c, 0x46, 0x51, 0x0c, 0xc5, 0x70, 0xeb, 0x28, 0x9a, + 0x45, 0x62, 0x6e, 0xb2, 0x78, 0x91, 0x31, 0xad, 0xe3, 0x8b, 0x61, 0xa6, 0x6d, 0x8e, 0x46, 0xb9, + 0x7e, 0x0c, 0x25, 0xf5, 0xd6, 0x8a, 0x62, 0x88, 0x46, 0x7a, 0x53, 0xd1, 0x58, 0x11, 0x77, 0xe9, + 0x8d, 0x71, 0x1a, 0xff, 0x67, 0xb5, 0x12, 0x97, 0x72, 0xff, 0x10, 0xf2, 0xe2, 0x2e, 0x1b, 0xa7, + 0x6f, 0xb8, 0x9b, 0x15, 0xa7, 0x6f, 0xe4, 0x22, 0x1c, 0x53, 0x92, 0x30, 0xb6, 0xb4, 0x66, 0x97, + 0x01, 0x5a, 0xb0, 0x7c, 0x4c, 0xbc, 0x24, 0x96, 0x41, 0x7f, 0x26, 0x89, 0xa5, 0x72, 0x5f, 0x1a, + 0xcb, 0xb2, 0x43, 0x3c, 0x71, 0x96, 0xe5, 0x65, 0x04, 0x25, 0x50, 0x54, 0xa3, 0x21, 0x1e, 0x87, + 0x92, 0x58, 0x45, 0x06, 0x5c, 0x45, 0x28, 0x44, 0xdf, 0x01, 0x08, 0x2e, 0xde, 0xd1, 0xc2, 0x20, + 0xb6, 0x7b, 0x17, 0x2d, 0x0c, 0xe2, 0xef, 0xee, 0x31, 0x1e, 0x1c, 0x30, 0xe7, 0x95, 0x2c, 0x65, + 0xff, 0x13, 0x0d, 0xd0, 0xe8, 0x45, 0x1d, 0x3d, 0x88, 0x67, 0x11, 0xdb, 0x18, 0xac, 0xbf, 0x72, + 0x3e, 0xe4, 0xc4, 0xe8, 0x19, 0xc8, 0xd5, 0x62, 0x4b, 0xfa, 0x2f, 0xa9, 0x64, 0x9f, 0x69, 0x50, + 0x0e, 0x5d, 0xf5, 0xd1, 0x9d, 0x84, 0x7d, 0x8e, 0x34, 0x17, 0xeb, 0x77, 0xcf, 0xc4, 0x4b, 0xac, + 0x9d, 0x94, 0x53, 0x21, 0xeb, 0xc6, 0x1f, 0x68, 0x50, 0x09, 0xf7, 0x07, 0x50, 0x02, 0x83, 0x91, + 0x0e, 0x65, 0x7d, 0xe9, 0x6c, 0xc4, 0x73, 0xec, 0x56, 0x50, 0x4a, 0x7e, 0x08, 0x79, 0xd1, 0x56, + 0x88, 0x73, 0x8b, 0x70, 0x83, 0x33, 0xce, 0x2d, 0x22, 0x3d, 0x89, 0x24, 0xb7, 0xa0, 0x37, 0x74, + 0xc5, 0x13, 0x45, 0xf3, 0x21, 0x89, 0xe5, 0x78, 0x4f, 0x8c, 0x74, 0x2e, 0xc6, 0xb2, 0x0c, 0x3c, + 0x51, 0xb6, 0x1e, 0x50, 0x02, 0xc5, 0x33, 0x3c, 0x31, 0xda, 0xb9, 0x48, 0xf2, 0x44, 0xc6, 0x55, + 0xf1, 0xc4, 0xa0, 0x53, 0x10, 0xe7, 0x89, 0x23, 0xed, 0xdb, 0x38, 0x4f, 0x1c, 0x6d, 0x36, 0x24, + 0xed, 0x2d, 0x63, 0x1e, 0xf2, 0xc4, 0xd9, 0x98, 0xce, 0x02, 0x7a, 0x25, 0xc1, 0xa6, 0xb1, 0xad, + 0xe1, 0xfa, 0xab, 0xe7, 0xc4, 0x1e, 0xef, 0x01, 0x7c, 0x37, 0xa4, 0x07, 0xfc, 0x42, 0x83, 0xb9, + 0xb8, 0xd6, 0x04, 0x4a, 0x60, 0x96, 0xd0, 0x57, 0xae, 0x2f, 0x9f, 0x17, 0xfd, 0x1c, 0x76, 0xf3, + 0x7d, 0xe2, 0x51, 0xf5, 0x77, 0x5f, 0x2e, 0x68, 0x7f, 0xfc, 0x72, 0x41, 0xfb, 0xf3, 0x97, 0x0b, + 0xda, 0x4f, 0xff, 0xb2, 0x30, 0x75, 0x98, 0x63, 0xff, 0xdb, 0xe3, 0x8d, 0x7f, 0x05, 0x00, 0x00, + 0xff, 0xff, 0x63, 0x1c, 0x78, 0x24, 0x74, 0x32, 0x00, 0x00, } diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go deleted file mode 100644 index 473ad582ef8e..000000000000 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.gw.go +++ /dev/null @@ -1,1911 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway -// source: etcdserver/etcdserverpb/rpc.proto -// DO NOT EDIT! - -/* -Package etcdserverpb is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package etcdserverpb - -import ( - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" -) - -var _ codes.Code -var _ io.Reader -var _ = runtime.String -var _ = utilities.NewDoubleArray - -func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RangeRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Range(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq PutRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Put(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DeleteRangeRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.DeleteRange(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq TxnRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Txn(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq CompactionRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Compact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client WatchClient, req *http.Request, pathParams map[string]string) (Watch_WatchClient, runtime.ServerMetadata, error) { - var metadata runtime.ServerMetadata - stream, err := client.Watch(ctx) - if err != nil { - grpclog.Printf("Failed to start streaming: %v", err) - return nil, metadata, err - } - dec := marshaler.NewDecoder(req.Body) - handleSend := func() error { - var protoReq WatchRequest - err = dec.Decode(&protoReq) - if err == io.EOF { - return err - } - if err != nil { - grpclog.Printf("Failed to decode request: %v", err) - return err - } - if err = stream.Send(&protoReq); err != nil { - grpclog.Printf("Failed to send request: %v", err) - return err - } - return nil - } - if err := handleSend(); err != nil { - if cerr := stream.CloseSend(); cerr != nil { - grpclog.Printf("Failed to terminate client stream: %v", cerr) - } - if err == io.EOF { - return stream, metadata, nil - } - return nil, metadata, err - } - go func() { - for { - if err := handleSend(); err != nil { - break - } - } - if err := stream.CloseSend(); err != nil { - grpclog.Printf("Failed to terminate client stream: %v", err) - } - }() - header, err := stream.Header() - if err != nil { - grpclog.Printf("Failed to get header from client: %v", err) - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil -} - -func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseGrantRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.LeaseGrant(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseRevokeRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) { - var metadata runtime.ServerMetadata - stream, err := client.LeaseKeepAlive(ctx) - if err != nil { - grpclog.Printf("Failed to start streaming: %v", err) - return nil, metadata, err - } - dec := marshaler.NewDecoder(req.Body) - handleSend := func() error { - var protoReq LeaseKeepAliveRequest - err = dec.Decode(&protoReq) - if err == io.EOF { - return err - } - if err != nil { - grpclog.Printf("Failed to decode request: %v", err) - return err - } - if err = stream.Send(&protoReq); err != nil { - grpclog.Printf("Failed to send request: %v", err) - return err - } - return nil - } - if err := handleSend(); err != nil { - if cerr := stream.CloseSend(); cerr != nil { - grpclog.Printf("Failed to terminate client stream: %v", cerr) - } - if err == io.EOF { - return stream, metadata, nil - } - return nil, metadata, err - } - go func() { - for { - if err := handleSend(); err != nil { - break - } - } - if err := stream.CloseSend(); err != nil { - grpclog.Printf("Failed to terminate client stream: %v", err) - } - }() - header, err := stream.Header() - if err != nil { - grpclog.Printf("Failed to get header from client: %v", err) - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil -} - -func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq LeaseTimeToLiveRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberAddRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.MemberAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberRemoveRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.MemberRemove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberUpdateRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.MemberUpdate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq MemberListRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.MemberList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AlarmRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Alarm(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq StatusRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq DefragmentRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Defragment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq HashRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Hash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client MaintenanceClient, req *http.Request, pathParams map[string]string) (Maintenance_SnapshotClient, runtime.ServerMetadata, error) { - var protoReq SnapshotRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - stream, err := client.Snapshot(ctx, &protoReq) - if err != nil { - return nil, metadata, err - } - header, err := stream.Header() - if err != nil { - return nil, metadata, err - } - metadata.HeaderMD = header - return stream, metadata, nil - -} - -func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthEnableRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.AuthEnable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthDisableRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.AuthDisable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthenticateRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Authenticate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserAddRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UserAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserGetRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UserGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserListRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UserList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserDeleteRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UserDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserChangePasswordRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UserChangePassword(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserGrantRoleRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UserGrantRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthUserRevokeRoleRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.UserRevokeRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleAddRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.RoleAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleGetRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.RoleGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleListRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.RoleList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleDeleteRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.RoleDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleGrantPermissionRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.RoleGrantPermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq AuthRoleRevokePermissionRequest - var metadata runtime.ServerMetadata - - if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil { - return nil, metadata, grpc.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.RoleRevokePermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -// RegisterKVHandlerFromEndpoint is same as RegisterKVHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterKVHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterKVHandler(ctx, mux, conn) -} - -// RegisterKVHandler registers the http handlers for service KV to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewKVClient(conn) - - mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_KV_Range_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_KV_Range_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_KV_Put_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_KV_Put_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_KV_DeleteRange_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_KV_DeleteRange_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_KV_Txn_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_KV_Txn_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_KV_Compact_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_KV_Compact_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_KV_Range_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "kv", "range"}, "")) - - pattern_KV_Put_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "kv", "put"}, "")) - - pattern_KV_DeleteRange_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "kv", "deleterange"}, "")) - - pattern_KV_Txn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "kv", "txn"}, "")) - - pattern_KV_Compact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "kv", "compaction"}, "")) -) - -var ( - forward_KV_Range_0 = runtime.ForwardResponseMessage - - forward_KV_Put_0 = runtime.ForwardResponseMessage - - forward_KV_DeleteRange_0 = runtime.ForwardResponseMessage - - forward_KV_Txn_0 = runtime.ForwardResponseMessage - - forward_KV_Compact_0 = runtime.ForwardResponseMessage -) - -// RegisterWatchHandlerFromEndpoint is same as RegisterWatchHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterWatchHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterWatchHandler(ctx, mux, conn) -} - -// RegisterWatchHandler registers the http handlers for service Watch to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewWatchClient(conn) - - mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Watch_Watch_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Watch_Watch_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Watch_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v3alpha", "watch"}, "")) -) - -var ( - forward_Watch_Watch_0 = runtime.ForwardResponseStream -) - -// RegisterLeaseHandlerFromEndpoint is same as RegisterLeaseHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterLeaseHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterLeaseHandler(ctx, mux, conn) -} - -// RegisterLeaseHandler registers the http handlers for service Lease to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewLeaseClient(conn) - - mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Lease_LeaseGrant_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Lease_LeaseGrant_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Lease_LeaseRevoke_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Lease_LeaseKeepAlive_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Lease_LeaseKeepAlive_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Lease_LeaseTimeToLive_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Lease_LeaseGrant_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lease", "grant"}, "")) - - pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "kv", "lease", "revoke"}, "")) - - pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "lease", "keepalive"}, "")) - - pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "kv", "lease", "timetolive"}, "")) -) - -var ( - forward_Lease_LeaseGrant_0 = runtime.ForwardResponseMessage - - forward_Lease_LeaseRevoke_0 = runtime.ForwardResponseMessage - - forward_Lease_LeaseKeepAlive_0 = runtime.ForwardResponseStream - - forward_Lease_LeaseTimeToLive_0 = runtime.ForwardResponseMessage -) - -// RegisterClusterHandlerFromEndpoint is same as RegisterClusterHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterClusterHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterClusterHandler(ctx, mux, conn) -} - -// RegisterClusterHandler registers the http handlers for service Cluster to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewClusterClient(conn) - - mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Cluster_MemberAdd_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Cluster_MemberAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Cluster_MemberRemove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Cluster_MemberRemove_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Cluster_MemberRemove_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Cluster_MemberUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Cluster_MemberUpdate_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Cluster_MemberUpdate_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Cluster_MemberList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Cluster_MemberList_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Cluster_MemberList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Cluster_MemberAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "cluster", "member", "add"}, "")) - - pattern_Cluster_MemberRemove_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "cluster", "member", "remove"}, "")) - - pattern_Cluster_MemberUpdate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "cluster", "member", "update"}, "")) - - pattern_Cluster_MemberList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "cluster", "member", "list"}, "")) -) - -var ( - forward_Cluster_MemberAdd_0 = runtime.ForwardResponseMessage - - forward_Cluster_MemberRemove_0 = runtime.ForwardResponseMessage - - forward_Cluster_MemberUpdate_0 = runtime.ForwardResponseMessage - - forward_Cluster_MemberList_0 = runtime.ForwardResponseMessage -) - -// RegisterMaintenanceHandlerFromEndpoint is same as RegisterMaintenanceHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterMaintenanceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterMaintenanceHandler(ctx, mux, conn) -} - -// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewMaintenanceClient(conn) - - mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Maintenance_Alarm_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Maintenance_Alarm_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Maintenance_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Maintenance_Status_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Maintenance_Status_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Maintenance_Defragment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Maintenance_Defragment_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Maintenance_Defragment_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Maintenance_Hash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Maintenance_Hash_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Maintenance_Hash_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Maintenance_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Maintenance_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Maintenance_Snapshot_0(ctx, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Maintenance_Alarm_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "alarm"}, "")) - - pattern_Maintenance_Status_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "status"}, "")) - - pattern_Maintenance_Defragment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "defragment"}, "")) - - pattern_Maintenance_Hash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "hash"}, "")) - - pattern_Maintenance_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "maintenance", "snapshot"}, "")) -) - -var ( - forward_Maintenance_Alarm_0 = runtime.ForwardResponseMessage - - forward_Maintenance_Status_0 = runtime.ForwardResponseMessage - - forward_Maintenance_Defragment_0 = runtime.ForwardResponseMessage - - forward_Maintenance_Hash_0 = runtime.ForwardResponseMessage - - forward_Maintenance_Snapshot_0 = runtime.ForwardResponseStream -) - -// RegisterAuthHandlerFromEndpoint is same as RegisterAuthHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterAuthHandler(ctx, mux, conn) -} - -// RegisterAuthHandler registers the http handlers for service Auth to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - client := NewAuthClient(conn) - - mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_AuthEnable_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_AuthEnable_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_AuthDisable_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_AuthDisable_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_Authenticate_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_Authenticate_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_UserAdd_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_UserAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_UserGet_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_UserGet_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_UserList_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_UserList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_UserDelete_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_UserDelete_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_UserChangePassword_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_UserChangePassword_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_UserGrantRole_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_UserGrantRole_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_UserRevokeRole_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_RoleAdd_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_RoleAdd_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_RoleGet_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_RoleGet_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_RoleList_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_RoleList_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_RoleDelete_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_RoleDelete_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_RoleGrantPermission_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("POST", pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - if cn, ok := w.(http.CloseNotifier); ok { - go func(done <-chan struct{}, closed <-chan bool) { - select { - case <-done: - case <-closed: - cancel() - } - }(ctx.Done(), cn.CloseNotify()) - } - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, req) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - } - resp, md, err := request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, outboundMarshaler, w, req, err) - return - } - - forward_Auth_RoleRevokePermission_0(ctx, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Auth_AuthEnable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "auth", "enable"}, "")) - - pattern_Auth_AuthDisable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "auth", "disable"}, "")) - - pattern_Auth_Authenticate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3alpha", "auth", "authenticate"}, "")) - - pattern_Auth_UserAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "add"}, "")) - - pattern_Auth_UserGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "get"}, "")) - - pattern_Auth_UserList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "list"}, "")) - - pattern_Auth_UserDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "delete"}, "")) - - pattern_Auth_UserChangePassword_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "changepw"}, "")) - - pattern_Auth_UserGrantRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "grant"}, "")) - - pattern_Auth_UserRevokeRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "user", "revoke"}, "")) - - pattern_Auth_RoleAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "add"}, "")) - - pattern_Auth_RoleGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "get"}, "")) - - pattern_Auth_RoleList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "list"}, "")) - - pattern_Auth_RoleDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "delete"}, "")) - - pattern_Auth_RoleGrantPermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "grant"}, "")) - - pattern_Auth_RoleRevokePermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3alpha", "auth", "role", "revoke"}, "")) -) - -var ( - forward_Auth_AuthEnable_0 = runtime.ForwardResponseMessage - - forward_Auth_AuthDisable_0 = runtime.ForwardResponseMessage - - forward_Auth_Authenticate_0 = runtime.ForwardResponseMessage - - forward_Auth_UserAdd_0 = runtime.ForwardResponseMessage - - forward_Auth_UserGet_0 = runtime.ForwardResponseMessage - - forward_Auth_UserList_0 = runtime.ForwardResponseMessage - - forward_Auth_UserDelete_0 = runtime.ForwardResponseMessage - - forward_Auth_UserChangePassword_0 = runtime.ForwardResponseMessage - - forward_Auth_UserGrantRole_0 = runtime.ForwardResponseMessage - - forward_Auth_UserRevokeRole_0 = runtime.ForwardResponseMessage - - forward_Auth_RoleAdd_0 = runtime.ForwardResponseMessage - - forward_Auth_RoleGet_0 = runtime.ForwardResponseMessage - - forward_Auth_RoleList_0 = runtime.ForwardResponseMessage - - forward_Auth_RoleDelete_0 = runtime.ForwardResponseMessage - - forward_Auth_RoleGrantPermission_0 = runtime.ForwardResponseMessage - - forward_Auth_RoleRevokePermission_0 = runtime.ForwardResponseMessage -) diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto index ddf1ad233299..a6cd00ab7c3e 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto +++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto @@ -352,11 +352,12 @@ message RangeRequest { bytes key = 1; // range_end is the upper bound on the requested range [key, range_end). // If range_end is '\0', the range is all keys >= key. - // If the range_end is one bit larger than the given key, - // then the range requests get the all keys with the prefix (the given key). - // If both key and range_end are '\0', then range requests returns all keys. + // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"), + // then the range request gets all keys prefixed with key. + // If both key and range_end are '\0', then the range request returns all keys. bytes range_end = 2; - // limit is a limit on the number of keys returned for the request. + // limit is a limit on the number of keys returned for the request. When limit is set to 0, + // it is treated as no limit. int64 limit = 3; // revision is the point-in-time of the key-value store to use for the range. // If revision is less or equal to zero, the range is over the newest key-value store. @@ -423,6 +424,14 @@ message PutRequest { // If prev_kv is set, etcd gets the previous key-value pair before changing it. // The previous key-value pair will be returned in the put response. bool prev_kv = 4; + + // If ignore_value is set, etcd updates the key using its current value. + // Returns an error if the key does not exist. + bool ignore_value = 5; + + // If ignore_lease is set, etcd updates the key using its current lease. + // Returns an error if the key does not exist. + bool ignore_lease = 6; } message PutResponse { @@ -436,13 +445,13 @@ message DeleteRangeRequest { bytes key = 1; // range_end is the key following the last key to delete for the range [key, range_end). // If range_end is not given, the range is defined to contain only the key argument. - // If range_end is one bit larger than the given key, then the range is all - // the all keys with the prefix (the given key). + // If range_end is one bit larger than the given key, then the range is all the keys + // with the prefix (the given key). // If range_end is '\0', the range is all keys greater than or equal to the key argument. bytes range_end = 2; // If prev_kv is set, etcd gets the previous key-value pairs before deleting it. - // The previous key-value pairs will be returned in the delte response. + // The previous key-value pairs will be returned in the delete response. bool prev_kv = 3; } @@ -645,6 +654,9 @@ message WatchResponse { // watcher with the same start_revision again. int64 compact_revision = 5; + // cancel_reason indicates the reason for canceling the watcher. + string cancel_reason = 6; + repeated mvccpb.Event events = 11; } @@ -725,6 +737,8 @@ message MemberAddResponse { ResponseHeader header = 1; // member is the member information for the added member. Member member = 2; + // members is a list of all members after adding the new member. + repeated Member members = 3; } message MemberRemoveRequest { @@ -734,6 +748,8 @@ message MemberRemoveRequest { message MemberRemoveResponse { ResponseHeader header = 1; + // members is a list of all members after removing the member. + repeated Member members = 2; } message MemberUpdateRequest { @@ -745,6 +761,8 @@ message MemberUpdateRequest { message MemberUpdateResponse{ ResponseHeader header = 1; + // members is a list of all members after updating the member. + repeated Member members = 2; } message MemberListRequest { diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go index 25c45dfce126..2330219f18ae 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go @@ -178,7 +178,7 @@ func (c *RaftCluster) String() string { fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " ")) var ids []string for id := range c.removed { - ids = append(ids, fmt.Sprintf("%s", id)) + ids = append(ids, id.String()) } fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " ")) return b.String() diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go b/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go new file mode 100644 index 000000000000..b07fb2d92859 --- /dev/null +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go @@ -0,0 +1,16 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package membership describes individual etcd members and clusters of members. +package membership diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go b/vendor/github.com/coreos/etcd/etcdserver/membership/store.go index f2ea0120d74b..d3f8f2474a42 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go +++ b/vendor/github.com/coreos/etcd/etcdserver/membership/store.go @@ -36,7 +36,7 @@ const ( var ( membersBucketName = []byte("members") - membersRemovedBuckedName = []byte("members_removed") + membersRemovedBucketName = []byte("members_removed") clusterBucketName = []byte("cluster") StoreMembersPrefix = path.Join(storePrefix, "members") @@ -62,7 +62,7 @@ func mustDeleteMemberFromBackend(be backend.Backend, id types.ID) { tx := be.BatchTx() tx.Lock() tx.UnsafeDelete(membersBucketName, mkey) - tx.UnsafePut(membersRemovedBuckedName, mkey, []byte("removed")) + tx.UnsafePut(membersRemovedBucketName, mkey, []byte("removed")) tx.Unlock() } @@ -164,7 +164,7 @@ func mustCreateBackendBuckets(be backend.Backend) { tx.Lock() defer tx.Unlock() tx.UnsafeCreateBucket(membersBucketName) - tx.UnsafeCreateBucket(membersRemovedBuckedName) + tx.UnsafeCreateBucket(membersRemovedBucketName) tx.UnsafeCreateBucket(clusterBucketName) } diff --git a/vendor/github.com/coreos/etcd/etcdserver/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/metrics.go index 2b549f738f7b..90bbd3632a6e 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/metrics.go +++ b/vendor/github.com/coreos/etcd/etcdserver/metrics.go @@ -58,6 +58,12 @@ var ( Name: "proposals_failed_total", Help: "The total number of failed proposals seen.", }) + leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "etcd_debugging", + Subsystem: "server", + Name: "lease_expired_total", + Help: "The total number of expired leases.", + }) ) func init() { @@ -67,6 +73,7 @@ func init() { prometheus.MustRegister(proposalsApplied) prometheus.MustRegister(proposalsPending) prometheus.MustRegister(proposalsFailed) + prometheus.MustRegister(leaseExpired) } func monitorFileDescriptor(done <-chan struct{}) { diff --git a/vendor/github.com/coreos/etcd/etcdserver/quota.go b/vendor/github.com/coreos/etcd/etcdserver/quota.go index 088a4696253d..87126f1564c1 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/quota.go +++ b/vendor/github.com/coreos/etcd/etcdserver/quota.go @@ -16,7 +16,15 @@ package etcdserver import ( pb "github.com/coreos/etcd/etcdserver/etcdserverpb" - "github.com/coreos/etcd/mvcc/backend" +) + +const ( + // DefaultQuotaBytes is the number of bytes the backend Size may + // consume before exceeding the space quota. + DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB + // MaxQuotaBytes is the maximum number of bytes suggested for a backend + // quota. A larger quota may lead to degraded performance. + MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB ) // Quota represents an arbitrary quota against arbitrary requests. Each request @@ -57,11 +65,10 @@ func NewBackendQuota(s *EtcdServer) Quota { } if s.Cfg.QuotaBackendBytes == 0 { // use default size if no quota size given - return &backendQuota{s, backend.DefaultQuotaBytes} + return &backendQuota{s, DefaultQuotaBytes} } - if s.Cfg.QuotaBackendBytes > backend.MaxQuotaBytes { - plog.Warningf("backend quota %v exceeds maximum quota %v; using maximum", s.Cfg.QuotaBackendBytes, backend.MaxQuotaBytes) - return &backendQuota{s, backend.MaxQuotaBytes} + if s.Cfg.QuotaBackendBytes > MaxQuotaBytes { + plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes) } return &backendQuota{s, s.Cfg.QuotaBackendBytes} } diff --git a/vendor/github.com/coreos/etcd/etcdserver/raft.go b/vendor/github.com/coreos/etcd/etcdserver/raft.go index d7ec176eb3a7..dcb894f82fb1 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/raft.go +++ b/vendor/github.com/coreos/etcd/etcdserver/raft.go @@ -83,7 +83,8 @@ type RaftTimer interface { type apply struct { entries []raftpb.Entry snapshot raftpb.Snapshot - raftDone <-chan struct{} // rx {} after raft has persisted messages + // notifyc synchronizes etcd server applies with the raft node + notifyc chan struct{} } type raftNode struct { @@ -94,14 +95,7 @@ type raftNode struct { term uint64 lead uint64 - mu sync.Mutex - // last lead elected time - lt time.Time - - // to check if msg receiver is removed from cluster - isIDRemoved func(id uint64) bool - - raft.Node + raftNodeConfig // a chan to send/receive snapshot msgSnapC chan raftpb.Message @@ -113,28 +107,51 @@ type raftNode struct { readStateC chan raft.ReadState // utility - ticker <-chan time.Time + ticker *time.Ticker // contention detectors for raft heartbeat message - td *contention.TimeoutDetector - heartbeat time.Duration // for logging + td *contention.TimeoutDetector + + stopped chan struct{} + done chan struct{} +} + +type raftNodeConfig struct { + // to check if msg receiver is removed from cluster + isIDRemoved func(id uint64) bool + raft.Node raftStorage *raft.MemoryStorage storage Storage + heartbeat time.Duration // for logging // transport specifies the transport to send and receive msgs to members. // Sending messages MUST NOT block. It is okay to drop messages, since // clients should timeout and reissue their messages. // If transport is nil, server will panic. transport rafthttp.Transporter +} - stopped chan struct{} - done chan struct{} +func newRaftNode(cfg raftNodeConfig) *raftNode { + r := &raftNode{ + raftNodeConfig: cfg, + // set up contention detectors for raft heartbeat message. + // expect to send a heartbeat within 2 heartbeat intervals. + td: contention.NewTimeoutDetector(2 * cfg.heartbeat), + readStateC: make(chan raft.ReadState, 1), + msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), + applyc: make(chan apply), + stopped: make(chan struct{}), + done: make(chan struct{}), + } + if r.heartbeat == 0 { + r.ticker = &time.Ticker{} + } else { + r.ticker = time.NewTicker(r.heartbeat) + } + return r } // start prepares and starts raftNode in a new goroutine. It is no longer safe // to modify the fields after it has been started. func (r *raftNode) start(rh *raftReadyHandler) { - r.applyc = make(chan apply) - r.stopped = make(chan struct{}) - r.done = make(chan struct{}) internalTimeout := time.Second go func() { @@ -143,14 +160,12 @@ func (r *raftNode) start(rh *raftReadyHandler) { for { select { - case <-r.ticker: + case <-r.ticker.C: r.Tick() case rd := <-r.Ready(): if rd.SoftState != nil { - if lead := atomic.LoadUint64(&r.lead); rd.SoftState.Lead != raft.None && lead != rd.SoftState.Lead { - r.mu.Lock() - r.lt = time.Now() - r.mu.Unlock() + newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead + if newLeader { leaderChanges.Inc() } @@ -162,7 +177,8 @@ func (r *raftNode) start(rh *raftReadyHandler) { atomic.StoreUint64(&r.lead, rd.SoftState.Lead) islead = rd.RaftState == raft.StateLeader - rh.updateLeadership() + rh.updateLeadership(newLeader) + r.td.Reset() } if len(rd.ReadStates) != 0 { @@ -175,11 +191,11 @@ func (r *raftNode) start(rh *raftReadyHandler) { } } - raftDone := make(chan struct{}, 1) + notifyc := make(chan struct{}, 1) ap := apply{ entries: rd.CommittedEntries, snapshot: rd.Snapshot, - raftDone: raftDone, + notifyc: notifyc, } updateCommittedIndex(&ap, rh) @@ -195,7 +211,7 @@ func (r *raftNode) start(rh *raftReadyHandler) { // For more details, check raft thesis 10.2.1 if islead { // gofail: var raftBeforeLeaderSend struct{} - r.sendMessages(rd.Messages) + r.transport.Send(r.processMessages(rd.Messages)) } // gofail: var raftBeforeSave struct{} @@ -212,6 +228,9 @@ func (r *raftNode) start(rh *raftReadyHandler) { if err := r.storage.SaveSnap(rd.Snapshot); err != nil { plog.Fatalf("raft save snapshot error: %v", err) } + // etcdserver now claim the snapshot has been persisted onto the disk + notifyc <- struct{}{} + // gofail: var raftAfterSaveSnap struct{} r.raftStorage.ApplySnapshot(rd.Snapshot) plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index) @@ -221,10 +240,44 @@ func (r *raftNode) start(rh *raftReadyHandler) { r.raftStorage.Append(rd.Entries) if !islead { + // finish processing incoming messages before we signal raftdone chan + msgs := r.processMessages(rd.Messages) + + // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots + notifyc <- struct{}{} + + // Candidate or follower needs to wait for all pending configuration + // changes to be applied before sending messages. + // Otherwise we might incorrectly count votes (e.g. votes from removed members). + // Also slow machine's follower raft-layer could proceed to become the leader + // on its own single-node cluster, before apply-layer applies the config change. + // We simply wait for ALL pending entries to be applied for now. + // We might improve this later on if it causes unnecessary long blocking issues. + waitApply := false + for _, ent := range rd.CommittedEntries { + if ent.Type == raftpb.EntryConfChange { + waitApply = true + break + } + } + if waitApply { + // blocks until 'applyAll' calls 'applyWait.Trigger' + // to be in sync with scheduled config-change job + // (assume notifyc has cap of 1) + select { + case notifyc <- struct{}{}: + case <-r.stopped: + return + } + } + // gofail: var raftBeforeFollowerSend struct{} - r.sendMessages(rd.Messages) + r.transport.Send(msgs) + } else { + // leader already processed 'MsgSnap' and signaled + notifyc <- struct{}{} } - raftDone <- struct{}{} + r.Advance() case <-r.stopped: return @@ -246,7 +299,7 @@ func updateCommittedIndex(ap *apply, rh *raftReadyHandler) { } } -func (r *raftNode) sendMessages(ms []raftpb.Message) { +func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message { sentAppResp := false for i := len(ms) - 1; i >= 0; i-- { if r.isIDRemoved(ms[i].To) { @@ -282,20 +335,13 @@ func (r *raftNode) sendMessages(ms []raftpb.Message) { } } } - - r.transport.Send(ms) + return ms } func (r *raftNode) apply() chan apply { return r.applyc } -func (r *raftNode) leadElectedTime() time.Time { - r.mu.Lock() - defer r.mu.Unlock() - return r.lt -} - func (r *raftNode) stop() { r.stopped <- struct{}{} <-r.done @@ -303,6 +349,7 @@ func (r *raftNode) stop() { func (r *raftNode) onStop() { r.Stop() + r.ticker.Stop() r.transport.Stop() if err := r.storage.Close(); err != nil { plog.Panicf("raft close storage error: %v", err) diff --git a/vendor/github.com/coreos/etcd/etcdserver/server.go b/vendor/github.com/coreos/etcd/etcdserver/server.go index 98eb2cc7b29b..271c5e773137 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/server.go @@ -23,7 +23,6 @@ import ( "net/http" "os" "path" - "path/filepath" "regexp" "sync" "sync/atomic" @@ -41,7 +40,6 @@ import ( "github.com/coreos/etcd/lease" "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/mvcc/backend" - "github.com/coreos/etcd/pkg/contention" "github.com/coreos/etcd/pkg/fileutil" "github.com/coreos/etcd/pkg/idutil" "github.com/coreos/etcd/pkg/pbutil" @@ -62,7 +60,7 @@ import ( ) const ( - DefaultSnapCount = 10000 + DefaultSnapCount = 100000 StoreClusterPrefix = "/0" StoreKeysPrefix = "/1" @@ -77,7 +75,6 @@ const ( // (since it will timeout). monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second - databaseFilename = "db" // max number of in-flight snapshot messages etcdserver allows to have // This number is more than enough for most clusters with 5 machines. maxInFlightMsgSnap = 16 @@ -85,7 +82,8 @@ const ( releaseDelayAfterSnapshot = 30 * time.Second // maxPendingRevokes is the maximum number of outstanding expired lease revocations. - maxPendingRevokes = 16 + maxPendingRevokes = 16 + recommendedMaxRequestBytes = 10 * 1024 * 1024 ) var ( @@ -135,15 +133,15 @@ type Server interface { // AddMember attempts to add a member into the cluster. It will return // ErrIDRemoved if member ID is removed from the cluster, or return // ErrIDExists if member ID exists in the cluster. - AddMember(ctx context.Context, memb membership.Member) error + AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) // RemoveMember attempts to remove a member from the cluster. It will // return ErrIDRemoved if member ID is removed from the cluster, or return // ErrIDNotFound if member ID is not in the cluster. - RemoveMember(ctx context.Context, id uint64) error + RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) // UpdateMember attempts to update an existing member in the cluster. It will // return ErrIDNotFound if the member ID does not exist. - UpdateMember(ctx context.Context, updateMemb membership.Member) error + UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error) // ClusterVersion is the cluster-wide minimum major.minor version. // Cluster version is set to the min version that an etcd member is @@ -201,7 +199,8 @@ type EtcdServer struct { cluster *membership.RaftCluster - store store.Store + store store.Store + snapshotter *snap.Snapshotter applyV2 ApplierV2 @@ -221,7 +220,7 @@ type EtcdServer struct { stats *stats.ServerStats lstats *stats.LeaderStats - SyncTicker <-chan time.Time + SyncTicker *time.Ticker // compactor is used to auto-compact the KV. compactor *compactor.Periodic @@ -238,6 +237,14 @@ type EtcdServer struct { // wg is used to wait for the go routines that depends on the server state // to exit when stopping the server. wg sync.WaitGroup + + // ctx is used for etcd-initiated requests that may need to be canceled + // on etcd server shutdown. + ctx context.Context + cancel context.CancelFunc + + leadTimeMu sync.RWMutex + leadElectedTime time.Time } // NewServer creates a new EtcdServer from the supplied configuration. The @@ -253,6 +260,10 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { cl *membership.RaftCluster ) + if cfg.MaxRequestBytes > recommendedMaxRequestBytes { + plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes) + } + if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { return nil, fmt.Errorf("cannot access data directory: %v", terr) } @@ -264,23 +275,9 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { } ss := snap.New(cfg.SnapDir()) - bepath := filepath.Join(cfg.SnapDir(), databaseFilename) + bepath := cfg.backendPath() beExist := fileutil.Exist(bepath) - - var be backend.Backend - beOpened := make(chan struct{}) - go func() { - be = backend.NewDefaultBackend(bepath) - beOpened <- struct{}{} - }() - - select { - case <-beOpened: - case <-time.After(time.Second): - plog.Warningf("another etcd process is running with the same data dir and holding the file lock.") - plog.Warningf("waiting for it to exit before starting...") - <-beOpened - } + be := openBackend(cfg) defer func() { if err != nil { @@ -378,6 +375,9 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { plog.Panicf("recovered store from snapshot error: %v", err) } plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index) + if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil { + plog.Panicf("recovering backend from snapshot error: %v", err) + } } cfg.Print() if !cfg.ForceNewCluster { @@ -400,39 +400,32 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { return nil, fmt.Errorf("cannot access member directory: %v", terr) } - sstats := &stats.ServerStats{ - Name: cfg.Name, - ID: id.String(), - } - sstats.Initialize() + sstats := stats.NewServerStats(cfg.Name, id.String()) lstats := stats.NewLeaderStats(id.String()) heartbeat := time.Duration(cfg.TickMs) * time.Millisecond srv = &EtcdServer{ - readych: make(chan struct{}), - Cfg: cfg, - snapCount: cfg.SnapCount, - errorc: make(chan error, 1), - store: st, - r: raftNode{ - isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, - Node: n, - ticker: time.Tick(heartbeat), - // set up contention detectors for raft heartbeat message. - // expect to send a heartbeat within 2 heartbeat intervals. - td: contention.NewTimeoutDetector(2 * heartbeat), - heartbeat: heartbeat, - raftStorage: s, - storage: NewStorage(w, ss), - msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap), - readStateC: make(chan raft.ReadState, 1), - }, + readych: make(chan struct{}), + Cfg: cfg, + snapCount: cfg.SnapCount, + errorc: make(chan error, 1), + store: st, + snapshotter: ss, + r: *newRaftNode( + raftNodeConfig{ + isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) }, + Node: n, + heartbeat: heartbeat, + raftStorage: s, + storage: NewStorage(w, ss), + }, + ), id: id, attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()}, cluster: cl, stats: sstats, lstats: lstats, - SyncTicker: time.Tick(500 * time.Millisecond), + SyncTicker: time.NewTicker(500 * time.Millisecond), peerRt: prt, reqIDGen: idutil.NewGenerator(uint16(id), time.Now()), forceVersionC: make(chan struct{}), @@ -458,12 +451,26 @@ func NewServer(cfg *ServerConfig) (srv *EtcdServer, err error) { plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index) } } - srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex()) + newSrv := srv // since srv == nil in defer if srv is returned as nil + defer func() { + // closing backend without first closing kv can cause + // resumed compactions to fail with closed tx errors + if err != nil { + newSrv.kv.Close() + } + }() - srv.authStore = auth.NewAuthStore(srv.be, + srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex()) + tp, err := auth.NewTokenProvider(cfg.AuthToken, func(index uint64) <-chan struct{} { return srv.applyWait.Wait(index) - }) + }, + ) + if err != nil { + plog.Errorf("failed to create token provider: %s", err) + return nil, err + } + srv.authStore = auth.NewAuthStore(srv.be, tp) if h := cfg.AutoCompactionRetention; h != 0 { srv.compactor = compactor.NewPeriodic(h, srv.kv, srv) srv.compactor.Run() @@ -531,6 +538,7 @@ func (s *EtcdServer) start() { s.done = make(chan struct{}) s.stop = make(chan struct{}) s.stopping = make(chan struct{}) + s.ctx, s.cancel = context.WithCancel(context.Background()) s.readwaitc = make(chan struct{}, 1) s.readNotifier = newNotifier() if s.ClusterVersion() != nil { @@ -603,16 +611,19 @@ type etcdProgress struct { // and helps decouple state machine logic from Raft algorithms. // TODO: add a state machine interface to apply the commit entries and do snapshot/recover type raftReadyHandler struct { - updateLeadership func() + updateLeadership func(newLeader bool) updateCommittedIndex func(uint64) } func (s *EtcdServer) run() { - snap, err := s.r.raftStorage.Snapshot() + sn, err := s.r.raftStorage.Snapshot() if err != nil { plog.Panicf("get snapshot from raft storage error: %v", err) } + // asynchronously accept apply packets, dispatch progress in-order + sched := schedule.NewFIFOScheduler() + var ( smu sync.RWMutex syncC <-chan time.Time @@ -629,7 +640,7 @@ func (s *EtcdServer) run() { return } rh := &raftReadyHandler{ - updateLeadership: func() { + updateLeadership: func(newLeader bool) { if !s.isLeader() { if s.lessor != nil { s.lessor.Demote() @@ -639,7 +650,13 @@ func (s *EtcdServer) run() { } setSyncC(nil) } else { - setSyncC(s.SyncTicker) + if newLeader { + t := time.Now() + s.leadTimeMu.Lock() + s.leadElectedTime = t + s.leadTimeMu.Unlock() + } + setSyncC(s.SyncTicker.C) if s.compactor != nil { s.compactor.Resume() } @@ -650,9 +667,6 @@ func (s *EtcdServer) run() { if s.stats != nil { s.stats.BecomeLeader() } - if s.r.td != nil { - s.r.td.Reset() - } }, updateCommittedIndex: func(ci uint64) { cci := s.getCommittedIndex() @@ -663,25 +677,26 @@ func (s *EtcdServer) run() { } s.r.start(rh) - // asynchronously accept apply packets, dispatch progress in-order - sched := schedule.NewFIFOScheduler() ep := etcdProgress{ - confState: snap.Metadata.ConfState, - snapi: snap.Metadata.Index, - appliedt: snap.Metadata.Term, - appliedi: snap.Metadata.Index, + confState: sn.Metadata.ConfState, + snapi: sn.Metadata.Index, + appliedt: sn.Metadata.Term, + appliedi: sn.Metadata.Index, } defer func() { s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping close(s.stopping) s.wgMu.Unlock() + s.cancel() sched.Stop() // wait for gouroutines before closing raft so wal stays open s.wg.Wait() + s.SyncTicker.Stop() + // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines // by adding a peer after raft stops the transport s.r.stop() @@ -728,7 +743,8 @@ func (s *EtcdServer) run() { } lid := lease.ID s.goAttach(func() { - s.LeaseRevoke(context.TODO(), &pb.LeaseRevokeRequest{ID: int64(lid)}) + s.LeaseRevoke(s.ctx, &pb.LeaseRevokeRequest{ID: int64(lid)}) + leaseExpired.Inc() <-c }) } @@ -762,7 +778,7 @@ func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) { // wait for the raft routine to finish the disk writes before triggering a // snapshot. or applied index might be greater than the last index in raft // storage, since the raft routine might be slower than apply routine. - <-apply.raftDone + <-apply.notifyc s.triggerSnapshot(ep) select { @@ -787,23 +803,19 @@ func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) { apply.snapshot.Metadata.Index, ep.appliedi) } - snapfn, err := s.r.storage.DBFilePath(apply.snapshot.Metadata.Index) - if err != nil { - plog.Panicf("get database snapshot file path error: %v", err) - } + // wait for raftNode to persist snapshot onto the disk + <-apply.notifyc - fn := filepath.Join(s.Cfg.SnapDir(), databaseFilename) - if err := os.Rename(snapfn, fn); err != nil { - plog.Panicf("rename snapshot file error: %v", err) + newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot) + if err != nil { + plog.Panic(err) } - newbe := backend.NewDefaultBackend(fn) - // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. if s.lessor != nil { plog.Info("recovering lessor...") - s.lessor.Recover(newbe, s.kv) + s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() }) plog.Info("finished recovering lessor") } @@ -955,7 +967,7 @@ func (s *EtcdServer) TransferLeadership() error { } tm := s.Cfg.ReqTimeout() - ctx, cancel := context.WithTimeout(context.TODO(), tm) + ctx, cancel := context.WithTimeout(s.ctx, tm) err := s.transferLeadership(ctx, s.Lead(), uint64(transferee)) cancel() return err @@ -1015,7 +1027,7 @@ func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() } func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error { if s.authStore == nil { - // In the context of ordinal etcd process, s.authStore will never be nil. + // In the context of ordinary etcd process, s.authStore will never be nil. // This branch is for handling cases in server_test.go return nil } @@ -1026,7 +1038,7 @@ func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) err // in the state machine layer // However, both of membership change and role management requires the root privilege. // So careful operation by admins can prevent the problem. - authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx) + authInfo, err := s.AuthInfoFromCtx(ctx) if err != nil { return err } @@ -1034,27 +1046,27 @@ func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) err return s.AuthStore().IsAdminPermitted(authInfo) } -func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) error { +func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { if err := s.checkMembershipOperationPermission(ctx); err != nil { - return err + return nil, err } if s.Cfg.StrictReconfigCheck { // by default StrictReconfigCheck is enabled; reject new members if unhealthy if !s.cluster.IsReadyToAddNewMember() { plog.Warningf("not enough started members, rejecting member add %+v", memb) - return ErrNotEnoughStartedMembers + return nil, ErrNotEnoughStartedMembers } if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) { plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb) - return ErrUnhealthy + return nil, ErrUnhealthy } } // TODO: move Member to protobuf type b, err := json.Marshal(memb) if err != nil { - return err + return nil, err } cc := raftpb.ConfChange{ Type: raftpb.ConfChangeAddNode, @@ -1064,14 +1076,14 @@ func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) erro return s.configure(ctx, cc) } -func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) error { +func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) { if err := s.checkMembershipOperationPermission(ctx); err != nil { - return err + return nil, err } // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss if err := s.mayRemoveMember(types.ID(id)); err != nil { - return err + return nil, err } cc := raftpb.ConfChange{ @@ -1107,14 +1119,14 @@ func (s *EtcdServer) mayRemoveMember(id types.ID) error { return nil } -func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) error { +func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) { b, merr := json.Marshal(memb) if merr != nil { - return merr + return nil, merr } if err := s.checkMembershipOperationPermission(ctx); err != nil { - return err + return nil, err } cc := raftpb.ConfChange{ Type: raftpb.ConfChangeUpdateNode, @@ -1137,31 +1149,34 @@ func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) } func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) } +type confChangeResponse struct { + membs []*membership.Member + err error +} + // configure sends a configuration change through consensus and // then waits for it to be applied to the server. It // will block until the change is performed or there is an error. -func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error { +func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) { cc.ID = s.reqIDGen.Next() ch := s.w.Register(cc.ID) start := time.Now() if err := s.r.ProposeConfChange(ctx, cc); err != nil { s.w.Trigger(cc.ID, nil) - return err + return nil, err } select { case x := <-ch: - if err, ok := x.(error); ok { - return err - } - if x != nil { - plog.Panicf("return type should always be error") + if x == nil { + plog.Panicf("configure trigger value should never be nil") } - return nil + resp := x.(*confChangeResponse) + return resp.membs, resp.err case <-ctx.Done(): s.w.Trigger(cc.ID, nil) // GC wait - return s.parseProposeCtxErr(ctx.Err(), start) + return nil, s.parseProposeCtxErr(ctx.Err(), start) case <-s.stopping: - return ErrStopped + return nil, ErrStopped } } @@ -1169,7 +1184,6 @@ func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) error // This makes no guarantee that the request will be proposed or performed. // The request will be canceled after the given timeout. func (s *EtcdServer) sync(timeout time.Duration) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) req := pb.Request{ Method: "SYNC", ID: s.reqIDGen.Next(), @@ -1178,6 +1192,7 @@ func (s *EtcdServer) sync(timeout time.Duration) { data := pbutil.MustMarshal(&req) // There is no promise that node has leader when do SYNC request, // so it uses goroutine to propose. + ctx, cancel := context.WithTimeout(s.ctx, timeout) s.goAttach(func() { s.r.Propose(ctx, data) cancel() @@ -1202,7 +1217,7 @@ func (s *EtcdServer) publish(timeout time.Duration) { } for { - ctx, cancel := context.WithTimeout(context.Background(), timeout) + ctx, cancel := context.WithTimeout(s.ctx, timeout) _, err := s.Do(ctx, req) cancel() switch err { @@ -1262,7 +1277,7 @@ func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appl removedSelf, err := s.applyConfChange(cc, confState) s.setAppliedIndex(e.Index) shouldStop = shouldStop || removedSelf - s.w.Trigger(cc.ID, err) + s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err}) default: plog.Panicf("entry type should be either EntryNormal or EntryConfChange") } @@ -1347,8 +1362,7 @@ func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) { Action: pb.AlarmRequest_ACTIVATE, Alarm: pb.AlarmType_NOSPACE, } - r := pb.InternalRaftRequest{Alarm: a} - s.processInternalRaftRequest(context.TODO(), r) + s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a}) s.w.Trigger(id, ar) }) } @@ -1544,7 +1558,7 @@ func (s *EtcdServer) updateClusterVersion(ver string) { Path: membership.StoreClusterVersionKey(), Val: ver, } - ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout()) + ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout()) _, err := s.Do(ctx, req) cancel() switch err { @@ -1563,7 +1577,9 @@ func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error { case context.Canceled: return ErrCanceled case context.DeadlineExceeded: - curLeadElected := s.r.leadElectedTime() + s.leadTimeMu.RLock() + curLeadElected := s.leadElectedTime + s.leadTimeMu.RUnlock() prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond) if start.After(prevLeadLost) && start.Before(curLeadElected) { return ErrTimeoutDueToLeaderFail diff --git a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go index 9cfc852168bd..928aa95b6b16 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go +++ b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go @@ -60,9 +60,14 @@ func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser { n, err := snapshot.WriteTo(pw) if err == nil { plog.Infof("wrote database snapshot out [total bytes: %d]", n) + } else { + plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err) } pw.CloseWithError(err) - snapshot.Close() + err = snapshot.Close() + if err != nil { + plog.Panicf("failed to close database snapshot: %v", err) + } }() return pr } diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go b/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go index 1bed85474e32..8f6a54ff751a 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go +++ b/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go @@ -24,25 +24,30 @@ import ( // LeaderStats is used by the leader in an etcd cluster, and encapsulates // statistics about communication with its followers type LeaderStats struct { + leaderStats + sync.Mutex +} + +type leaderStats struct { // Leader is the ID of the leader in the etcd cluster. // TODO(jonboulle): clarify that these are IDs, not names Leader string `json:"leader"` Followers map[string]*FollowerStats `json:"followers"` - - sync.Mutex } // NewLeaderStats generates a new LeaderStats with the given id as leader func NewLeaderStats(id string) *LeaderStats { return &LeaderStats{ - Leader: id, - Followers: make(map[string]*FollowerStats), + leaderStats: leaderStats{ + Leader: id, + Followers: make(map[string]*FollowerStats), + }, } } func (ls *LeaderStats) JSON() []byte { ls.Lock() - stats := *ls + stats := ls.leaderStats ls.Unlock() b, err := json.Marshal(stats) // TODO(jonboulle): appropriate error handling? diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go b/vendor/github.com/coreos/etcd/etcdserver/stats/server.go index cd450e2d1999..0278e885cf99 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/stats/server.go @@ -26,6 +26,26 @@ import ( // ServerStats encapsulates various statistics about an EtcdServer and its // communication with other members of the cluster type ServerStats struct { + serverStats + sync.Mutex +} + +func NewServerStats(name, id string) *ServerStats { + ss := &ServerStats{ + serverStats: serverStats{ + Name: name, + ID: id, + }, + } + now := time.Now() + ss.StartTime = now + ss.LeaderInfo.StartTime = now + ss.sendRateQueue = &statsQueue{back: -1} + ss.recvRateQueue = &statsQueue{back: -1} + return ss +} + +type serverStats struct { Name string `json:"name"` // ID is the raft ID of the node. // TODO(jonboulle): use ID instead of name? @@ -49,17 +69,15 @@ type ServerStats struct { sendRateQueue *statsQueue recvRateQueue *statsQueue - - sync.Mutex } func (ss *ServerStats) JSON() []byte { ss.Lock() - stats := *ss + stats := ss.serverStats ss.Unlock() stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String() - stats.SendingPkgRate, stats.SendingBandwidthRate = stats.SendRates() - stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.RecvRates() + stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate() + stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate() b, err := json.Marshal(stats) // TODO(jonboulle): appropriate error handling? if err != nil { @@ -68,32 +86,6 @@ func (ss *ServerStats) JSON() []byte { return b } -// Initialize clears the statistics of ServerStats and resets its start time -func (ss *ServerStats) Initialize() { - if ss == nil { - return - } - now := time.Now() - ss.StartTime = now - ss.LeaderInfo.StartTime = now - ss.sendRateQueue = &statsQueue{ - back: -1, - } - ss.recvRateQueue = &statsQueue{ - back: -1, - } -} - -// RecvRates calculates and returns the rate of received append requests -func (ss *ServerStats) RecvRates() (float64, float64) { - return ss.recvRateQueue.Rate() -} - -// SendRates calculates and returns the rate of sent append requests -func (ss *ServerStats) SendRates() (float64, float64) { - return ss.sendRateQueue.Rate() -} - // RecvAppendReq updates the ServerStats in response to an AppendRequest // from the given leader being received func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) { diff --git a/vendor/github.com/coreos/etcd/etcdserver/storage.go b/vendor/github.com/coreos/etcd/etcdserver/storage.go index 693618fbd51c..aa8f87569dbe 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/storage.go +++ b/vendor/github.com/coreos/etcd/etcdserver/storage.go @@ -32,9 +32,6 @@ type Storage interface { Save(st raftpb.HardState, ents []raftpb.Entry) error // SaveSnap function saves snapshot to the underlying stable storage. SaveSnap(snap raftpb.Snapshot) error - // DBFilePath returns the file path of database snapshot saved with given - // id. - DBFilePath(id uint64) (string, error) // Close closes the Storage and performs finalization. Close() error } diff --git a/vendor/github.com/coreos/etcd/etcdserver/util.go b/vendor/github.com/coreos/etcd/etcdserver/util.go index 66084ae12446..e3896ffc2d3d 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/util.go +++ b/vendor/github.com/coreos/etcd/etcdserver/util.go @@ -87,7 +87,7 @@ type notifier struct { func newNotifier() *notifier { return ¬ifier{ - c: make(chan struct{}, 0), + c: make(chan struct{}), } } diff --git a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go index 60653cb6dff4..ae449bbf22ff 100644 --- a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go +++ b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go @@ -19,6 +19,8 @@ import ( "encoding/binary" "time" + "github.com/gogo/protobuf/proto" + "github.com/coreos/etcd/auth" pb "github.com/coreos/etcd/etcdserver/etcdserverpb" "github.com/coreos/etcd/etcdserver/membership" @@ -27,17 +29,10 @@ import ( "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/raft" - "github.com/coreos/go-semver/semver" "golang.org/x/net/context" ) const ( - // the max request size that raft accepts. - // TODO: make this a flag? But we probably do not want to - // accept large request which might block raft stream. User - // specify a large value might end up with shooting in the foot. - maxRequestBytes = 1.5 * 1024 * 1024 - // In the health case, there might be a small gap (10s of entries) between // the applied index and committed index. // However, if the committed entries are very heavy to apply, the gap might grow. @@ -45,10 +40,6 @@ const ( maxGapBetweenApplyAndCommitIndex = 5000 ) -var ( - newRangeClusterVersion = *semver.Must(semver.NewVersion("3.1.0")) -) - type RaftKV interface { Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) @@ -91,11 +82,6 @@ type Authenticator interface { } func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - // TODO: remove this checking when we release etcd 3.2 - if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) { - return s.legacyRange(ctx, r) - } - if !r.Serializable { err := s.linearizableReadNotify(ctx) if err != nil { @@ -107,65 +93,30 @@ func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeRe chk := func(ai *auth.AuthInfo) error { return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) } - get := func() { resp, err = s.applyV3Base.Range(noTxn, r) } + get := func() { resp, err = s.applyV3Base.Range(nil, r) } if serr := s.doSerialize(ctx, chk, get); serr != nil { return nil, serr } return resp, err } -// TODO: remove this func when we release etcd 3.2 -func (s *EtcdServer) legacyRange(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) { - if r.Serializable { - var resp *pb.RangeResponse - var err error - chk := func(ai *auth.AuthInfo) error { - return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd) - } - get := func() { resp, err = s.applyV3Base.Range(noTxn, r) } - if serr := s.doSerialize(ctx, chk, get); serr != nil { - return nil, serr - } - return resp, err - } - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Range: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.RangeResponse), nil -} - func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Put: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.PutResponse), nil + return resp.(*pb.PutResponse), nil } func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.DeleteRangeResponse), nil + return resp.(*pb.DeleteRangeResponse), nil } func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - // TODO: remove this checking when we release etcd 3.2 - if s.ClusterVersion() == nil || s.ClusterVersion().LessThan(newRangeClusterVersion) { - return s.legacyTxn(ctx, r) - } - if isTxnReadonly(r) { if !isTxnSerializable(r) { err := s.linearizableReadNotify(ctx) @@ -184,38 +135,11 @@ func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse } return resp, err } - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r}) - if err != nil { - return nil, err - } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.TxnResponse), nil -} - -// TODO: remove this func when we release etcd 3.2 -func (s *EtcdServer) legacyTxn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) { - if isTxnSerializable(r) { - var resp *pb.TxnResponse - var err error - chk := func(ai *auth.AuthInfo) error { - return checkTxnAuth(s.authStore, ai, r) - } - get := func() { resp, err = s.applyV3Base.Txn(r) } - if serr := s.doSerialize(ctx, chk, get); serr != nil { - return nil, serr - } - return resp, err - } - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{Txn: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.TxnResponse), nil + return resp.(*pb.TxnResponse), nil } func isTxnSerializable(r *pb.TxnRequest) bool { @@ -280,25 +204,19 @@ func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (* // only use positive int64 id's r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1)) } - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r}) + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.LeaseGrantResponse), nil + return resp.(*pb.LeaseGrantResponse), nil } func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) { - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r}) + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.LeaseRevokeResponse), nil + return resp.(*pb.LeaseRevokeResponse), nil } func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) { @@ -394,54 +312,45 @@ func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) } func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) { - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r}) + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AlarmResponse), nil + return resp.(*pb.AlarmResponse), nil } func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) { - result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r}) + resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthEnableResponse), nil + return resp.(*pb.AuthEnableResponse), nil } func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthDisableResponse), nil + return resp.(*pb.AuthDisableResponse), nil } func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) { - var result *applyResult - - err := s.linearizableReadNotify(ctx) - if err != nil { + if err := s.linearizableReadNotify(ctx); err != nil { return nil, err } + var resp proto.Message for { checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password) if err != nil { - plog.Errorf("invalid authentication request to user %s was issued", r.Name) + if err != auth.ErrAuthNotEnabled { + plog.Errorf("invalid authentication request to user %s was issued", r.Name) + } return nil, err } - st, err := s.AuthStore().GenSimpleToken() + st, err := s.AuthStore().GenTokenPrefix() if err != nil { return nil, err } @@ -452,172 +361,147 @@ func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest SimpleToken: st, } - result, err = s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq}) + resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - - if checkedRevision != s.AuthStore().Revision() { - plog.Infof("revision when password checked is obsolete, retrying") - continue + if checkedRevision == s.AuthStore().Revision() { + break } - - break + plog.Infof("revision when password checked is obsolete, retrying") } - return result.resp.(*pb.AuthenticateResponse), nil + return resp.(*pb.AuthenticateResponse), nil } func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserAddResponse), nil + return resp.(*pb.AuthUserAddResponse), nil } func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserDeleteResponse), nil + return resp.(*pb.AuthUserDeleteResponse), nil } func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserChangePasswordResponse), nil + return resp.(*pb.AuthUserChangePasswordResponse), nil } func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserGrantRoleResponse), nil + return resp.(*pb.AuthUserGrantRoleResponse), nil } func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserGetResponse), nil + return resp.(*pb.AuthUserGetResponse), nil } func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserListResponse), nil + return resp.(*pb.AuthUserListResponse), nil } func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthUserRevokeRoleResponse), nil + return resp.(*pb.AuthUserRevokeRoleResponse), nil } func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleAddResponse), nil + return resp.(*pb.AuthRoleAddResponse), nil } func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleGrantPermissionResponse), nil + return resp.(*pb.AuthRoleGrantPermissionResponse), nil } func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleGetResponse), nil + return resp.(*pb.AuthRoleGetResponse), nil } func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleListResponse), nil + return resp.(*pb.AuthRoleListResponse), nil } func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r}) if err != nil { return nil, err } - if result.err != nil { - return nil, result.err - } - return result.resp.(*pb.AuthRoleRevokePermissionResponse), nil + return resp.(*pb.AuthRoleRevokePermissionResponse), nil } func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) { - result, err := s.processInternalRaftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r}) + resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r}) + if err != nil { + return nil, err + } + return resp.(*pb.AuthRoleDeleteResponse), nil +} + +func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { + result, err := s.processInternalRaftRequestOnce(ctx, r) if err != nil { return nil, err } if result.err != nil { return nil, result.err } - return result.resp.(*pb.AuthRoleDeleteResponse), nil + return result.resp, nil +} + +func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) { + for { + resp, err := s.raftRequestOnce(ctx, r) + if err != auth.ErrAuthOldRevision { + return resp, err + } + } } // doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure. func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error { for { - ai, err := s.AuthStore().AuthInfoFromCtx(ctx) + ai, err := s.AuthInfoFromCtx(ctx) if err != nil { return err } @@ -652,7 +536,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In ID: s.reqIDGen.Next(), } - authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx) + authInfo, err := s.AuthInfoFromCtx(ctx) if err != nil { return nil, err } @@ -666,7 +550,7 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In return nil, err } - if len(data) > maxRequestBytes { + if len(data) > int(s.Cfg.MaxRequestBytes) { return nil, ErrRequestTooLarge } @@ -696,19 +580,6 @@ func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.In } } -func (s *EtcdServer) processInternalRaftRequest(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) { - var result *applyResult - var err error - for { - result, err = s.processInternalRaftRequestOnce(ctx, r) - if err != auth.ErrAuthOldRevision { - break - } - } - - return result, err -} - // Watchable returns a watchable interface attached to the etcdserver. func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() } @@ -802,3 +673,14 @@ func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error { return ErrStopped } } + +func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) { + if s.Cfg.ClientCertAuthEnabled { + authInfo := s.AuthStore().AuthInfoFromTLS(ctx) + if authInfo != nil { + return authInfo, nil + } + } + + return s.AuthStore().AuthInfoFromCtx(ctx) +} diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go b/vendor/github.com/coreos/etcd/lease/leasehttp/http.go index 256051efc8dd..c3175cbbb0f0 100644 --- a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go +++ b/vendor/github.com/coreos/etcd/lease/leasehttp/http.go @@ -16,6 +16,7 @@ package leasehttp import ( "bytes" + "context" "errors" "fmt" "io/ioutil" @@ -26,7 +27,6 @@ import ( "github.com/coreos/etcd/lease" "github.com/coreos/etcd/lease/leasepb" "github.com/coreos/etcd/pkg/httputil" - "golang.org/x/net/context" ) var ( @@ -202,45 +202,27 @@ func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string } req.Header.Set("Content-Type", "application/protobuf") - cancel := httputil.RequestCanceler(req) + req = req.WithContext(ctx) cc := &http.Client{Transport: rt} var b []byte // buffer errc channel so that errc don't block inside the go routinue - errc := make(chan error, 2) - go func() { - resp, err := cc.Do(req) - if err != nil { - errc <- err - return - } - b, err = readResponse(resp) - if err != nil { - errc <- err - return - } - if resp.StatusCode == http.StatusRequestTimeout { - errc <- ErrLeaseHTTPTimeout - return - } - if resp.StatusCode == http.StatusNotFound { - errc <- lease.ErrLeaseNotFound - return - } - if resp.StatusCode != http.StatusOK { - errc <- fmt.Errorf("lease: unknown error(%s)", string(b)) - return - } - errc <- nil - }() - select { - case derr := <-errc: - if derr != nil { - return nil, derr - } - case <-ctx.Done(): - cancel() - return nil, ctx.Err() + resp, err := cc.Do(req) + if err != nil { + return nil, err + } + b, err = readResponse(resp) + if err != nil { + return nil, err + } + if resp.StatusCode == http.StatusRequestTimeout { + return nil, ErrLeaseHTTPTimeout + } + if resp.StatusCode == http.StatusNotFound { + return nil, lease.ErrLeaseNotFound + } + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("lease: unknown error(%s)", string(b)) } lresp := &leasepb.LeaseInternalResponse{} diff --git a/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go b/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go index fb3a9bab0c32..ec8db732be57 100644 --- a/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go +++ b/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go @@ -590,7 +590,7 @@ func init() { proto.RegisterFile("lease.proto", fileDescriptorLease) } var fileDescriptorLease = []byte{ // 233 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x2d, 0xb5, 0x24, 0x39, 0x45, 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, 0x92, diff --git a/vendor/github.com/coreos/etcd/lease/lessor.go b/vendor/github.com/coreos/etcd/lease/lessor.go index 385bd76d73c7..3418cf565edb 100644 --- a/vendor/github.com/coreos/etcd/lease/lessor.go +++ b/vendor/github.com/coreos/etcd/lease/lessor.go @@ -31,40 +31,39 @@ import ( const ( // NoLease is a special LeaseID representing the absence of a lease. NoLease = LeaseID(0) + + forever = monotime.Time(math.MaxInt64) ) var ( leaseBucketName = []byte("lease") - forever = monotime.Time(math.MaxInt64) + // maximum number of leases to revoke per second; configurable for tests + leaseRevokeRate = 1000 ErrNotPrimary = errors.New("not a primary lessor") ErrLeaseNotFound = errors.New("lease not found") ErrLeaseExists = errors.New("lease already exists") ) -type LeaseID int64 - -// RangeDeleter defines an interface with Txn and DeleteRange method. -// We define this interface only for lessor to limit the number -// of methods of mvcc.KV to what lessor actually needs. -// -// Having a minimum interface makes testing easy. -type RangeDeleter interface { - // TxnBegin see comments on mvcc.KV - TxnBegin() int64 - // TxnEnd see comments on mvcc.KV - TxnEnd(txnID int64) error - // TxnDeleteRange see comments on mvcc.KV - TxnDeleteRange(txnID int64, key, end []byte) (n, rev int64, err error) +// TxnDelete is a TxnWrite that only permits deletes. Defined here +// to avoid circular dependency with mvcc. +type TxnDelete interface { + DeleteRange(key, end []byte) (n, rev int64) + End() } +// RangeDeleter is a TxnDelete constructor. +type RangeDeleter func() TxnDelete + +type LeaseID int64 + // Lessor owns leases. It can grant, revoke, renew and modify leases for lessee. type Lessor interface { - // SetRangeDeleter sets the RangeDeleter to the Lessor. - // Lessor deletes the items in the revoked or expired lease from the - // the set RangeDeleter. - SetRangeDeleter(dr RangeDeleter) + // SetRangeDeleter lets the lessor create TxnDeletes to the store. + // Lessor deletes the items in the revoked or expired lease by creating + // new TxnDeletes. + SetRangeDeleter(rd RangeDeleter) // Grant grants a lease that expires at least after TTL seconds. Grant(id LeaseID, ttl int64) (*Lease, error) @@ -248,17 +247,14 @@ func (le *lessor) Revoke(id LeaseID) error { return nil } - tid := le.rd.TxnBegin() + txn := le.rd() // sort keys so deletes are in same order among all members, // otherwise the backened hashes will be different keys := l.Keys() sort.StringSlice(keys).Sort() for _, key := range keys { - _, _, err := le.rd.TxnDeleteRange(tid, []byte(key), nil) - if err != nil { - panic(err) - } + txn.DeleteRange([]byte(key), nil) } le.mu.Lock() @@ -269,11 +265,7 @@ func (le *lessor) Revoke(id LeaseID) error { // deleting the keys if etcdserver fails in between. le.b.BatchTx().UnsafeDelete(leaseBucketName, int64ToBytes(int64(l.ID))) - err := le.rd.TxnEnd(tid) - if err != nil { - panic(err) - } - + txn.End() return nil } @@ -335,8 +327,53 @@ func (le *lessor) Promote(extend time.Duration) { for _, l := range le.leaseMap { l.refresh(extend) } + + if len(le.leaseMap) < leaseRevokeRate { + // no possibility of lease pile-up + return + } + + // adjust expiries in case of overlap + leases := make([]*Lease, 0, len(le.leaseMap)) + for _, l := range le.leaseMap { + leases = append(leases, l) + } + sort.Sort(leasesByExpiry(leases)) + + baseWindow := leases[0].Remaining() + nextWindow := baseWindow + time.Second + expires := 0 + // have fewer expires than the total revoke rate so piled up leases + // don't consume the entire revoke limit + targetExpiresPerSecond := (3 * leaseRevokeRate) / 4 + for _, l := range leases { + remaining := l.Remaining() + if remaining > nextWindow { + baseWindow = remaining + nextWindow = baseWindow + time.Second + expires = 1 + continue + } + expires++ + if expires <= targetExpiresPerSecond { + continue + } + rateDelay := float64(time.Second) * (float64(expires) / float64(targetExpiresPerSecond)) + // If leases are extended by n seconds, leases n seconds ahead of the + // base window should be extended by only one second. + rateDelay -= float64(remaining - baseWindow) + delay := time.Duration(rateDelay) + nextWindow = baseWindow + delay + l.refresh(delay + extend) + } } +type leasesByExpiry []*Lease + +func (le leasesByExpiry) Len() int { return len(le) } +func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() } +func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] } + func (le *lessor) Demote() { le.mu.Lock() defer le.mu.Unlock() @@ -433,6 +470,10 @@ func (le *lessor) runLoop() { le.mu.Unlock() if len(ls) != 0 { + // rate limit + if len(ls) > leaseRevokeRate/2 { + ls = ls[:leaseRevokeRate/2] + } select { case <-le.stopC: return diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go index e5e0028f94be..87edd25f427c 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go @@ -25,7 +25,7 @@ import ( "sync/atomic" "time" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" "github.com/coreos/pkg/capnslog" ) @@ -35,25 +35,21 @@ var ( defragLimit = 10000 - // InitialMmapSize is the initial size of the mmapped region. Setting this larger than + // initialMmapSize is the initial size of the mmapped region. Setting this larger than // the potential max db size can prevent writer from blocking reader. // This only works for linux. - InitialMmapSize = int64(10 * 1024 * 1024 * 1024) + initialMmapSize = uint64(10 * 1024 * 1024 * 1024) plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc/backend") -) -const ( - // DefaultQuotaBytes is the number of bytes the backend Size may - // consume before exceeding the space quota. - DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB - // MaxQuotaBytes is the maximum number of bytes suggested for a backend - // quota. A larger quota may lead to degraded performance. - MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB + // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning. + minSnapshotWarningTimeout = time.Duration(30 * time.Second) ) type Backend interface { + ReadTx() ReadTx BatchTx() BatchTx + Snapshot() Snapshot Hash(ignores map[IgnoreKey]struct{}) (uint32, error) // Size returns the current size of the backend. @@ -86,36 +82,71 @@ type backend struct { batchInterval time.Duration batchLimit int - batchTx *batchTx + batchTx *batchTxBuffered + + readTx *readTx stopc chan struct{} donec chan struct{} } -func New(path string, d time.Duration, limit int) Backend { - return newBackend(path, d, limit) +type BackendConfig struct { + // Path is the file path to the backend file. + Path string + // BatchInterval is the maximum time before flushing the BatchTx. + BatchInterval time.Duration + // BatchLimit is the maximum puts before flushing the BatchTx. + BatchLimit int + // MmapSize is the number of bytes to mmap for the backend. + MmapSize uint64 +} + +func DefaultBackendConfig() BackendConfig { + return BackendConfig{ + BatchInterval: defaultBatchInterval, + BatchLimit: defaultBatchLimit, + MmapSize: initialMmapSize, + } +} + +func New(bcfg BackendConfig) Backend { + return newBackend(bcfg) } func NewDefaultBackend(path string) Backend { - return newBackend(path, defaultBatchInterval, defaultBatchLimit) + bcfg := DefaultBackendConfig() + bcfg.Path = path + return newBackend(bcfg) } -func newBackend(path string, d time.Duration, limit int) *backend { - db, err := bolt.Open(path, 0600, boltOpenOptions) +func newBackend(bcfg BackendConfig) *backend { + bopts := &bolt.Options{} + if boltOpenOptions != nil { + *bopts = *boltOpenOptions + } + bopts.InitialMmapSize = bcfg.mmapSize() + + db, err := bolt.Open(bcfg.Path, 0600, bopts) if err != nil { - plog.Panicf("cannot open database at %s (%v)", path, err) + plog.Panicf("cannot open database at %s (%v)", bcfg.Path, err) } + // In future, may want to make buffering optional for low-concurrency systems + // or dynamically swap between buffered/non-buffered depending on workload. b := &backend{ db: db, - batchInterval: d, - batchLimit: limit, + batchInterval: bcfg.BatchInterval, + batchLimit: bcfg.BatchLimit, + + readTx: &readTx{buf: txReadBuffer{ + txBuffer: txBuffer{make(map[string]*bucketBuffer)}}, + }, stopc: make(chan struct{}), donec: make(chan struct{}), } - b.batchTx = newBatchTx(b) + b.batchTx = newBatchTxBuffered(b) go b.run() return b } @@ -127,6 +158,8 @@ func (b *backend) BatchTx() BatchTx { return b.batchTx } +func (b *backend) ReadTx() ReadTx { return b.readTx } + // ForceCommit forces the current batching tx to commit. func (b *backend) ForceCommit() { b.batchTx.Commit() @@ -141,7 +174,33 @@ func (b *backend) Snapshot() Snapshot { if err != nil { plog.Fatalf("cannot begin tx (%s)", err) } - return &snapshot{tx} + + stopc, donec := make(chan struct{}), make(chan struct{}) + dbBytes := tx.Size() + go func() { + defer close(donec) + // sendRateBytes is based on transferring snapshot data over a 1 gigabit/s connection + // assuming a min tcp throughput of 100MB/s. + var sendRateBytes int64 = 100 * 1024 * 1014 + warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second))) + if warningTimeout < minSnapshotWarningTimeout { + warningTimeout = minSnapshotWarningTimeout + } + start := time.Now() + ticker := time.NewTicker(warningTimeout) + defer ticker.Stop() + for { + select { + case <-ticker.C: + plog.Warningf("snapshotting is taking more than %v seconds to finish transferring %v MB [started at %v]", time.Since(start).Seconds(), float64(dbBytes)/float64(1024*1014), start) + case <-stopc: + snapshotDurations.Observe(time.Since(start).Seconds()) + return + } + } + }() + + return &snapshot{tx, stopc, donec} } type IgnoreKey struct { @@ -235,7 +294,11 @@ func (b *backend) defrag() error { b.mu.Lock() defer b.mu.Unlock() - b.batchTx.commit(true) + // block concurrent read requests while resetting tx + b.readTx.mu.Lock() + defer b.readTx.mu.Unlock() + + b.batchTx.unsafeCommit(true) b.batchTx.tx = nil tmpdb, err := bolt.Open(b.db.Path()+".tmp", 0600, boltOpenOptions) @@ -276,6 +339,10 @@ func (b *backend) defrag() error { plog.Fatalf("cannot begin tx (%s)", err) } + b.readTx.buf.reset() + b.readTx.tx = b.unsafeBegin(false) + atomic.StoreInt64(&b.size, b.readTx.tx.Size()) + return nil } @@ -331,6 +398,22 @@ func defragdb(odb, tmpdb *bolt.DB, limit int) error { return tmptx.Commit() } +func (b *backend) begin(write bool) *bolt.Tx { + b.mu.RLock() + tx := b.unsafeBegin(write) + b.mu.RUnlock() + atomic.StoreInt64(&b.size, tx.Size()) + return tx +} + +func (b *backend) unsafeBegin(write bool) *bolt.Tx { + tx, err := b.db.Begin(write) + if err != nil { + plog.Fatalf("cannot begin tx (%s)", err) + } + return tx +} + // NewTmpBackend creates a backend implementation for testing. func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, string) { dir, err := ioutil.TempDir(os.TempDir(), "etcd_backend_test") @@ -338,7 +421,9 @@ func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, strin plog.Fatal(err) } tmpPath := filepath.Join(dir, "database") - return newBackend(tmpPath, batchInterval, batchLimit), tmpPath + bcfg := DefaultBackendConfig() + bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = tmpPath, batchInterval, batchLimit + return newBackend(bcfg), tmpPath } func NewDefaultTmpBackend() (*backend, string) { @@ -347,6 +432,12 @@ func NewDefaultTmpBackend() (*backend, string) { type snapshot struct { *bolt.Tx + stopc chan struct{} + donec chan struct{} } -func (s *snapshot) Close() error { return s.Tx.Rollback() } +func (s *snapshot) Close() error { + close(s.stopc) + <-s.donec + return s.Tx.Rollback() +} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go index 04fea1e9477d..e5fb84740899 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go @@ -16,23 +16,24 @@ package backend import ( "bytes" + "fmt" + "math" "sync" "sync/atomic" "time" - "github.com/boltdb/bolt" + bolt "github.com/coreos/bbolt" ) type BatchTx interface { - Lock() - Unlock() + ReadTx UnsafeCreateBucket(name []byte) UnsafePut(bucketName []byte, key []byte, value []byte) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) - UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) UnsafeDelete(bucketName []byte, key []byte) - UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error + // Commit commits a previous tx and begins a new writable one. Commit() + // CommitAndStop commits the previous tx and does not create a new one. CommitAndStop() } @@ -40,13 +41,8 @@ type batchTx struct { sync.Mutex tx *bolt.Tx backend *backend - pending int -} -func newBatchTx(backend *backend) *batchTx { - tx := &batchTx{backend: backend} - tx.Commit() - return tx + pending int } func (t *batchTx) UnsafeCreateBucket(name []byte) { @@ -84,30 +80,37 @@ func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq boo } // UnsafeRange must be called holding the lock on the tx. -func (t *batchTx) UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) { - bucket := t.tx.Bucket(bucketName) - if bucket == nil { - plog.Fatalf("bucket %s does not exist", bucketName) +func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + k, v, err := unsafeRange(t.tx, bucketName, key, endKey, limit) + if err != nil { + plog.Fatal(err) } + return k, v +} +func unsafeRange(tx *bolt.Tx, bucketName, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte, err error) { + bucket := tx.Bucket(bucketName) + if bucket == nil { + return nil, nil, fmt.Errorf("bucket %s does not exist", bucketName) + } if len(endKey) == 0 { - if v := bucket.Get(key); v == nil { - return keys, vs - } else { - return append(keys, key), append(vs, v) + if v := bucket.Get(key); v != nil { + return append(keys, key), append(vs, v), nil } + return nil, nil, nil + } + if limit <= 0 { + limit = math.MaxInt64 } - c := bucket.Cursor() for ck, cv := c.Seek(key); ck != nil && bytes.Compare(ck, endKey) < 0; ck, cv = c.Next() { vs = append(vs, cv) keys = append(keys, ck) - if limit > 0 && limit == int64(len(keys)) { + if limit == int64(len(keys)) { break } } - - return keys, vs + return keys, vs, nil } // UnsafeDelete must be called holding the lock on the tx. @@ -125,12 +128,14 @@ func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) { // UnsafeForEach must be called holding the lock on the tx. func (t *batchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error { - b := t.tx.Bucket(bucketName) - if b == nil { - // bucket does not exist - return nil + return unsafeForEach(t.tx, bucketName, visitor) +} + +func unsafeForEach(tx *bolt.Tx, bucket []byte, visitor func(k, v []byte) error) error { + if b := tx.Bucket(bucket); b != nil { + return b.ForEach(visitor) } - return b.ForEach(visitor) + return nil } // Commit commits a previous tx and begins a new writable one. @@ -140,7 +145,7 @@ func (t *batchTx) Commit() { t.commit(false) } -// CommitAndStop commits the previous tx and do not create a new one. +// CommitAndStop commits the previous tx and does not create a new one. func (t *batchTx) CommitAndStop() { t.Lock() defer t.Unlock() @@ -150,37 +155,28 @@ func (t *batchTx) CommitAndStop() { func (t *batchTx) Unlock() { if t.pending >= t.backend.batchLimit { t.commit(false) - t.pending = 0 } t.Mutex.Unlock() } func (t *batchTx) commit(stop bool) { - var err error // commit the last tx if t.tx != nil { if t.pending == 0 && !stop { t.backend.mu.RLock() defer t.backend.mu.RUnlock() - // batchTx.commit(true) calls *bolt.Tx.Commit, which - // initializes *bolt.Tx.db and *bolt.Tx.meta as nil, - // and subsequent *bolt.Tx.Size() call panics. - // - // This nil pointer reference panic happens when: - // 1. batchTx.commit(false) from newBatchTx - // 2. batchTx.commit(true) from stopping backend - // 3. batchTx.commit(false) from inflight mvcc Hash call - // - // Check if db is nil to prevent this panic - if t.tx.DB() != nil { - atomic.StoreInt64(&t.backend.size, t.tx.Size()) - } + // t.tx.DB()==nil if 'CommitAndStop' calls 'batchTx.commit(true)', + // which initializes *bolt.Tx.db and *bolt.Tx.meta as nil; panics t.tx.Size(). + // Server must make sure 'batchTx.commit(false)' does not follow + // 'batchTx.commit(true)' (e.g. stopping backend, and inflight Hash call). + atomic.StoreInt64(&t.backend.size, t.tx.Size()) return } + start := time.Now() // gofail: var beforeCommit struct{} - err = t.tx.Commit() + err := t.tx.Commit() // gofail: var afterCommit struct{} commitDurations.Observe(time.Since(start).Seconds()) atomic.AddInt64(&t.backend.commits, 1) @@ -190,17 +186,81 @@ func (t *batchTx) commit(stop bool) { plog.Fatalf("cannot commit tx (%s)", err) } } + if !stop { + t.tx = t.backend.begin(true) + } +} + +type batchTxBuffered struct { + batchTx + buf txWriteBuffer +} - if stop { - return +func newBatchTxBuffered(backend *backend) *batchTxBuffered { + tx := &batchTxBuffered{ + batchTx: batchTx{backend: backend}, + buf: txWriteBuffer{ + txBuffer: txBuffer{make(map[string]*bucketBuffer)}, + seq: true, + }, } + tx.Commit() + return tx +} - t.backend.mu.RLock() - defer t.backend.mu.RUnlock() - // begin a new tx - t.tx, err = t.backend.db.Begin(true) - if err != nil { - plog.Fatalf("cannot begin tx (%s)", err) +func (t *batchTxBuffered) Unlock() { + if t.pending != 0 { + t.backend.readTx.mu.Lock() + t.buf.writeback(&t.backend.readTx.buf) + t.backend.readTx.mu.Unlock() + if t.pending >= t.backend.batchLimit { + t.commit(false) + } } - atomic.StoreInt64(&t.backend.size, t.tx.Size()) + t.batchTx.Unlock() +} + +func (t *batchTxBuffered) Commit() { + t.Lock() + defer t.Unlock() + t.commit(false) +} + +func (t *batchTxBuffered) CommitAndStop() { + t.Lock() + defer t.Unlock() + t.commit(true) +} + +func (t *batchTxBuffered) commit(stop bool) { + // all read txs must be closed to acquire boltdb commit rwlock + t.backend.readTx.mu.Lock() + defer t.backend.readTx.mu.Unlock() + t.unsafeCommit(stop) +} + +func (t *batchTxBuffered) unsafeCommit(stop bool) { + if t.backend.readTx.tx != nil { + if err := t.backend.readTx.tx.Rollback(); err != nil { + plog.Fatalf("cannot rollback tx (%s)", err) + } + t.backend.readTx.buf.reset() + t.backend.readTx.tx = nil + } + + t.batchTx.commit(stop) + + if !stop { + t.backend.readTx.tx = t.backend.begin(false) + } +} + +func (t *batchTxBuffered) UnsafePut(bucketName []byte, key []byte, value []byte) { + t.batchTx.UnsafePut(bucketName, key, value) + t.buf.put(bucketName, key, value) +} + +func (t *batchTxBuffered) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) { + t.batchTx.UnsafeSeqPut(bucketName, key, value) + t.buf.putSeq(bucketName, key, value) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_default.go b/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_default.go deleted file mode 100644 index 92019c18415c..000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_default.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2016 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !linux - -package backend - -import "github.com/boltdb/bolt" - -var boltOpenOptions *bolt.Options = nil diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go b/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go deleted file mode 100644 index 4ee9b05a77cb..000000000000 --- a/vendor/github.com/coreos/etcd/mvcc/backend/boltoption_linux.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package backend - -import ( - "syscall" - - "github.com/boltdb/bolt" -) - -// syscall.MAP_POPULATE on linux 2.6.23+ does sequential read-ahead -// which can speed up entire-database read with boltdb. We want to -// enable MAP_POPULATE for faster key-value store recovery in storage -// package. If your kernel version is lower than 2.6.23 -// (https://github.com/torvalds/linux/releases/tag/v2.6.23), mmap might -// silently ignore this flag. Please update your kernel to prevent this. -var boltOpenOptions = &bolt.Options{ - MmapFlags: syscall.MAP_POPULATE, - InitialMmapSize: int(InitialMmapSize), -} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go new file mode 100644 index 000000000000..edfed0025c6c --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go @@ -0,0 +1,23 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !linux,!windows + +package backend + +import bolt "github.com/coreos/bbolt" + +var boltOpenOptions *bolt.Options = nil + +func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go new file mode 100644 index 000000000000..a8f6abeba63e --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go @@ -0,0 +1,33 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "syscall" + + bolt "github.com/coreos/bbolt" +) + +// syscall.MAP_POPULATE on linux 2.6.23+ does sequential read-ahead +// which can speed up entire-database read with boltdb. We want to +// enable MAP_POPULATE for faster key-value store recovery in storage +// package. If your kernel version is lower than 2.6.23 +// (https://github.com/torvalds/linux/releases/tag/v2.6.23), mmap might +// silently ignore this flag. Please update your kernel to prevent this. +var boltOpenOptions = &bolt.Options{ + MmapFlags: syscall.MAP_POPULATE, +} + +func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go new file mode 100644 index 000000000000..71d02700bcdc --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build windows + +package backend + +import bolt "github.com/coreos/bbolt" + +var boltOpenOptions *bolt.Options = nil + +// setting mmap size != 0 on windows will allocate the entire +// mmap size for the file, instead of growing it. So, force 0. + +func (bcfg *BackendConfig) mmapSize() int { return 0 } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go b/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go index 34a56a91956f..30a388014766 100644 --- a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go +++ b/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go @@ -24,8 +24,18 @@ var ( Help: "The latency distributions of commit called by backend.", Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), }) + + snapshotDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "etcd", + Subsystem: "disk", + Name: "backend_snapshot_duration_seconds", + Help: "The latency distribution of backend snapshots.", + // 10 ms -> 655 seconds + Buckets: prometheus.ExponentialBuckets(.01, 2, 17), + }) ) func init() { prometheus.MustRegister(commitDurations) + prometheus.MustRegister(snapshotDurations) } diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go new file mode 100644 index 000000000000..9fc6b7906206 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go @@ -0,0 +1,92 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "bytes" + "math" + "sync" + + bolt "github.com/coreos/bbolt" +) + +// safeRangeBucket is a hack to avoid inadvertently reading duplicate keys; +// overwrites on a bucket should only fetch with limit=1, but safeRangeBucket +// is known to never overwrite any key so range is safe. +var safeRangeBucket = []byte("key") + +type ReadTx interface { + Lock() + Unlock() + + UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) + UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error +} + +type readTx struct { + // mu protects accesses to the txReadBuffer + mu sync.RWMutex + buf txReadBuffer + + // txmu protects accesses to the Tx on Range requests + txmu sync.Mutex + tx *bolt.Tx +} + +func (rt *readTx) Lock() { rt.mu.RLock() } +func (rt *readTx) Unlock() { rt.mu.RUnlock() } + +func (rt *readTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + if endKey == nil { + // forbid duplicates for single keys + limit = 1 + } + if limit <= 0 { + limit = math.MaxInt64 + } + if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) { + panic("do not use unsafeRange on non-keys bucket") + } + keys, vals := rt.buf.Range(bucketName, key, endKey, limit) + if int64(len(keys)) == limit { + return keys, vals + } + rt.txmu.Lock() + // ignore error since bucket may have been created in this batch + k2, v2, _ := unsafeRange(rt.tx, bucketName, key, endKey, limit-int64(len(keys))) + rt.txmu.Unlock() + return append(k2, keys...), append(v2, vals...) +} + +func (rt *readTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error { + dups := make(map[string]struct{}) + f1 := func(k, v []byte) error { + dups[string(k)] = struct{}{} + return visitor(k, v) + } + f2 := func(k, v []byte) error { + if _, ok := dups[string(k)]; ok { + return nil + } + return visitor(k, v) + } + if err := rt.buf.ForEach(bucketName, f1); err != nil { + return err + } + rt.txmu.Lock() + err := unsafeForEach(rt.tx, bucketName, f2) + rt.txmu.Unlock() + return err +} diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go b/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go new file mode 100644 index 000000000000..56e885dbfbc3 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go @@ -0,0 +1,181 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package backend + +import ( + "bytes" + "sort" +) + +// txBuffer handles functionality shared between txWriteBuffer and txReadBuffer. +type txBuffer struct { + buckets map[string]*bucketBuffer +} + +func (txb *txBuffer) reset() { + for k, v := range txb.buckets { + if v.used == 0 { + // demote + delete(txb.buckets, k) + } + v.used = 0 + } +} + +// txWriteBuffer buffers writes of pending updates that have not yet committed. +type txWriteBuffer struct { + txBuffer + seq bool +} + +func (txw *txWriteBuffer) put(bucket, k, v []byte) { + txw.seq = false + txw.putSeq(bucket, k, v) +} + +func (txw *txWriteBuffer) putSeq(bucket, k, v []byte) { + b, ok := txw.buckets[string(bucket)] + if !ok { + b = newBucketBuffer() + txw.buckets[string(bucket)] = b + } + b.add(k, v) +} + +func (txw *txWriteBuffer) writeback(txr *txReadBuffer) { + for k, wb := range txw.buckets { + rb, ok := txr.buckets[k] + if !ok { + delete(txw.buckets, k) + txr.buckets[k] = wb + continue + } + if !txw.seq && wb.used > 1 { + // assume no duplicate keys + sort.Sort(wb) + } + rb.merge(wb) + } + txw.reset() +} + +// txReadBuffer accesses buffered updates. +type txReadBuffer struct{ txBuffer } + +func (txr *txReadBuffer) Range(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) { + if b := txr.buckets[string(bucketName)]; b != nil { + return b.Range(key, endKey, limit) + } + return nil, nil +} + +func (txr *txReadBuffer) ForEach(bucketName []byte, visitor func(k, v []byte) error) error { + if b := txr.buckets[string(bucketName)]; b != nil { + return b.ForEach(visitor) + } + return nil +} + +type kv struct { + key []byte + val []byte +} + +// bucketBuffer buffers key-value pairs that are pending commit. +type bucketBuffer struct { + buf []kv + // used tracks number of elements in use so buf can be reused without reallocation. + used int +} + +func newBucketBuffer() *bucketBuffer { + return &bucketBuffer{buf: make([]kv, 512), used: 0} +} + +func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) { + f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 } + idx := sort.Search(bb.used, f) + if idx < 0 { + return nil, nil + } + if len(endKey) == 0 { + if bytes.Equal(key, bb.buf[idx].key) { + keys = append(keys, bb.buf[idx].key) + vals = append(vals, bb.buf[idx].val) + } + return keys, vals + } + if bytes.Compare(endKey, bb.buf[idx].key) <= 0 { + return nil, nil + } + for i := idx; i < bb.used && int64(len(keys)) < limit; i++ { + if bytes.Compare(endKey, bb.buf[i].key) <= 0 { + break + } + keys = append(keys, bb.buf[i].key) + vals = append(vals, bb.buf[i].val) + } + return keys, vals +} + +func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error { + for i := 0; i < bb.used; i++ { + if err := visitor(bb.buf[i].key, bb.buf[i].val); err != nil { + return err + } + } + return nil +} + +func (bb *bucketBuffer) add(k, v []byte) { + bb.buf[bb.used].key, bb.buf[bb.used].val = k, v + bb.used++ + if bb.used == len(bb.buf) { + buf := make([]kv, (3*len(bb.buf))/2) + copy(buf, bb.buf) + bb.buf = buf + } +} + +// merge merges data from bb into bbsrc. +func (bb *bucketBuffer) merge(bbsrc *bucketBuffer) { + for i := 0; i < bbsrc.used; i++ { + bb.add(bbsrc.buf[i].key, bbsrc.buf[i].val) + } + if bb.used == bbsrc.used { + return + } + if bytes.Compare(bb.buf[(bb.used-bbsrc.used)-1].key, bbsrc.buf[0].key) < 0 { + return + } + + sort.Stable(bb) + + // remove duplicates, using only newest update + widx := 0 + for ridx := 1; ridx < bb.used; ridx++ { + if !bytes.Equal(bb.buf[ridx].key, bb.buf[widx].key) { + widx++ + } + bb.buf[widx] = bb.buf[ridx] + } + bb.used = widx + 1 +} + +func (bb *bucketBuffer) Len() int { return bb.used } +func (bb *bucketBuffer) Less(i, j int) bool { + return bytes.Compare(bb.buf[i].key, bb.buf[j].key) < 0 +} +func (bb *bucketBuffer) Swap(i, j int) { bb.buf[i], bb.buf[j] = bb.buf[j], bb.buf[i] } diff --git a/vendor/github.com/coreos/etcd/mvcc/index.go b/vendor/github.com/coreos/etcd/mvcc/index.go index 397098a7ba75..991289cdd5c4 100644 --- a/vendor/github.com/coreos/etcd/mvcc/index.go +++ b/vendor/github.com/coreos/etcd/mvcc/index.go @@ -29,7 +29,9 @@ type index interface { RangeSince(key, end []byte, rev int64) []revision Compact(rev int64) map[revision]struct{} Equal(b index) bool + Insert(ki *keyIndex) + KeyIndex(ki *keyIndex) *keyIndex } type treeIndex struct { @@ -60,18 +62,27 @@ func (ti *treeIndex) Put(key []byte, rev revision) { func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) { keyi := &keyIndex{key: key} - ti.RLock() defer ti.RUnlock() - item := ti.tree.Get(keyi) - if item == nil { + if keyi = ti.keyIndex(keyi); keyi == nil { return revision{}, revision{}, 0, ErrRevisionNotFound } - - keyi = item.(*keyIndex) return keyi.get(atRev) } +func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex { + ti.RLock() + defer ti.RUnlock() + return ti.keyIndex(keyi) +} + +func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex { + if item := ti.tree.Get(keyi); item != nil { + return item.(*keyIndex) + } + return nil +} + func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) { if end == nil { rev, _, _, err := ti.Get(key, atRev) diff --git a/vendor/github.com/coreos/etcd/mvcc/key_index.go b/vendor/github.com/coreos/etcd/mvcc/key_index.go index 983c64e2f6bf..9104f9b2d36a 100644 --- a/vendor/github.com/coreos/etcd/mvcc/key_index.go +++ b/vendor/github.com/coreos/etcd/mvcc/key_index.go @@ -222,7 +222,6 @@ func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) { } // remove the previous generations. ki.generations = ki.generations[i:] - return } func (ki *keyIndex) isEmpty() bool { diff --git a/vendor/github.com/coreos/etcd/mvcc/kv.go b/vendor/github.com/coreos/etcd/mvcc/kv.go index c851c8725e88..6636347aa431 100644 --- a/vendor/github.com/coreos/etcd/mvcc/kv.go +++ b/vendor/github.com/coreos/etcd/mvcc/kv.go @@ -32,15 +32,15 @@ type RangeResult struct { Count int } -type KV interface { - // Rev returns the current revision of the KV. - Rev() int64 - - // FirstRev returns the first revision of the KV. +type ReadView interface { + // FirstRev returns the first KV revision at the time of opening the txn. // After a compaction, the first revision increases to the compaction // revision. FirstRev() int64 + // Rev returns the revision of the KV at the time of opening the txn. + Rev() int64 + // Range gets the keys in the range at rangeRev. // The returned rev is the current revision of the KV when the operation is executed. // If rangeRev <=0, range gets the keys at currentRev. @@ -50,14 +50,17 @@ type KV interface { // Limit limits the number of keys returned. // If the required rev is compacted, ErrCompacted will be returned. Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) +} - // Put puts the given key, value into the store. Put also takes additional argument lease to - // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease - // id. - // A put also increases the rev of the store, and generates one event in the event history. - // The returned rev is the current revision of the KV when the operation is executed. - Put(key, value []byte, lease lease.LeaseID) (rev int64) +// TxnRead represents a read-only transaction with operations that will not +// block other read transactions. +type TxnRead interface { + ReadView + // End marks the transaction is complete and ready to commit. + End() +} +type WriteView interface { // DeleteRange deletes the given range from the store. // A deleteRange increases the rev of the store if any key in the range exists. // The number of key deleted will be returned. @@ -67,26 +70,51 @@ type KV interface { // if the `end` is not nil, deleteRange deletes the keys in range [key, range_end). DeleteRange(key, end []byte) (n, rev int64) - // TxnBegin begins a txn. Only Txn prefixed operation can be executed, others will be blocked - // until txn ends. Only one on-going txn is allowed. - // TxnBegin returns an int64 txn ID. - // All txn prefixed operations with same txn ID will be done with the same rev. - TxnBegin() int64 - // TxnEnd ends the on-going txn with txn ID. If the on-going txn ID is not matched, error is returned. - TxnEnd(txnID int64) error - // TxnRange returns the current revision of the KV when the operation is executed. - TxnRange(txnID int64, key, end []byte, ro RangeOptions) (r *RangeResult, err error) - TxnPut(txnID int64, key, value []byte, lease lease.LeaseID) (rev int64, err error) - TxnDeleteRange(txnID int64, key, end []byte) (n, rev int64, err error) + // Put puts the given key, value into the store. Put also takes additional argument lease to + // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease + // id. + // A put also increases the rev of the store, and generates one event in the event history. + // The returned rev is the current revision of the KV when the operation is executed. + Put(key, value []byte, lease lease.LeaseID) (rev int64) +} + +// TxnWrite represents a transaction that can modify the store. +type TxnWrite interface { + TxnRead + WriteView + // Changes gets the changes made since opening the write txn. + Changes() []mvccpb.KeyValue +} - // Compact frees all superseded keys with revisions less than rev. - Compact(rev int64) (<-chan struct{}, error) +// txnReadWrite coerces a read txn to a write, panicking on any write operation. +type txnReadWrite struct{ TxnRead } + +func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") } +func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + panic("unexpected Put") +} +func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil } + +func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} } + +type KV interface { + ReadView + WriteView + + // Read creates a read transaction. + Read() TxnRead + + // Write creates a write transaction. + Write() TxnWrite // Hash retrieves the hash of KV state and revision. - // This method is designed for consistency checking purpose. + // This method is designed for consistency checking purposes. Hash() (hash uint32, revision int64, err error) - // Commit commits txns into the underlying backend. + // Compact frees all superseded keys with revisions less than rev. + Compact(rev int64) (<-chan struct{}, error) + + // Commit commits outstanding txns into the underlying backend. Commit() // Restore restores the KV store from a backend. diff --git a/vendor/github.com/coreos/etcd/mvcc/kv_view.go b/vendor/github.com/coreos/etcd/mvcc/kv_view.go new file mode 100644 index 000000000000..f40ba8edc22b --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/kv_view.go @@ -0,0 +1,53 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" +) + +type readView struct{ kv KV } + +func (rv *readView) FirstRev() int64 { + tr := rv.kv.Read() + defer tr.End() + return tr.FirstRev() +} + +func (rv *readView) Rev() int64 { + tr := rv.kv.Read() + defer tr.End() + return tr.Rev() +} + +func (rv *readView) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + tr := rv.kv.Read() + defer tr.End() + return tr.Range(key, end, ro) +} + +type writeView struct{ kv KV } + +func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) { + tw := wv.kv.Write() + defer tw.End() + return tw.DeleteRange(key, end) +} + +func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + tw := wv.kv.Write() + defer tw.End() + return tw.Put(key, value, lease) +} diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore.go b/vendor/github.com/coreos/etcd/mvcc/kvstore.go index 28a18a06597a..28a508ccb959 100644 --- a/vendor/github.com/coreos/etcd/mvcc/kvstore.go +++ b/vendor/github.com/coreos/etcd/mvcc/kvstore.go @@ -18,7 +18,6 @@ import ( "encoding/binary" "errors" "math" - "math/rand" "sync" "time" @@ -34,25 +33,29 @@ var ( keyBucketName = []byte("key") metaBucketName = []byte("meta") - // markedRevBytesLen is the byte length of marked revision. - // The first `revBytesLen` bytes represents a normal revision. The last - // one byte is the mark. - markedRevBytesLen = revBytesLen + 1 - markBytePosition = markedRevBytesLen - 1 - markTombstone byte = 't' - consistentIndexKeyName = []byte("consistent_index") scheduledCompactKeyName = []byte("scheduledCompactRev") finishedCompactKeyName = []byte("finishedCompactRev") - ErrTxnIDMismatch = errors.New("mvcc: txn id mismatch") - ErrCompacted = errors.New("mvcc: required revision has been compacted") - ErrFutureRev = errors.New("mvcc: required revision is a future revision") - ErrCanceled = errors.New("mvcc: watcher is canceled") + ErrCompacted = errors.New("mvcc: required revision has been compacted") + ErrFutureRev = errors.New("mvcc: required revision is a future revision") + ErrCanceled = errors.New("mvcc: watcher is canceled") + ErrClosed = errors.New("mvcc: closed") plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc") ) +const ( + // markedRevBytesLen is the byte length of marked revision. + // The first `revBytesLen` bytes represents a normal revision. The last + // one byte is the mark. + markedRevBytesLen = revBytesLen + 1 + markBytePosition = markedRevBytesLen - 1 + markTombstone byte = 't' +) + +var restoreChunkKeys = 10000 // non-const for testing + // ConsistentIndexGetter is an interface that wraps the Get method. // Consistent index is the offset of an entry in a consistent replicated log. type ConsistentIndexGetter interface { @@ -61,7 +64,11 @@ type ConsistentIndexGetter interface { } type store struct { - mu sync.Mutex // guards the following + ReadView + WriteView + + // mu read locks for txns and write locks for non-txn store changes. + mu sync.RWMutex ig ConsistentIndexGetter @@ -70,19 +77,19 @@ type store struct { le lease.Lessor - currentRev revision - // the main revision of the last compaction + // revMuLock protects currentRev and compactMainRev. + // Locked at end of write txn and released after write txn unlock lock. + // Locked before locking read txn and released after locking. + revMu sync.RWMutex + // currentRev is the revision of the last completed transaction. + currentRev int64 + // compactMainRev is the main revision of the last compaction. compactMainRev int64 - tx backend.BatchTx - txnID int64 // tracks the current txnID to verify txn operations - txnModify bool - // bytesBuf8 is a byte slice of length 8 // to avoid a repetitive allocation in saveIndex. bytesBuf8 []byte - changes []mvccpb.KeyValue fifoSched schedule.Scheduler stopc chan struct{} @@ -98,17 +105,18 @@ func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *sto le: le, - currentRev: revision{main: 1}, + currentRev: 1, compactMainRev: -1, - bytesBuf8: make([]byte, 8, 8), + bytesBuf8: make([]byte, 8), fifoSched: schedule.NewFIFOScheduler(), stopc: make(chan struct{}), } - + s.ReadView = &readView{s} + s.WriteView = &writeView{s} if s.le != nil { - s.le.SetRangeDeleter(s) + s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) } tx := s.b.BatchTx() @@ -126,140 +134,6 @@ func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *sto return s } -func (s *store) Rev() int64 { - s.mu.Lock() - defer s.mu.Unlock() - - return s.currentRev.main -} - -func (s *store) FirstRev() int64 { - s.mu.Lock() - defer s.mu.Unlock() - - return s.compactMainRev -} - -func (s *store) Put(key, value []byte, lease lease.LeaseID) int64 { - id := s.TxnBegin() - s.put(key, value, lease) - s.txnEnd(id) - - putCounter.Inc() - - return int64(s.currentRev.main) -} - -func (s *store) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - id := s.TxnBegin() - kvs, count, rev, err := s.rangeKeys(key, end, ro.Limit, ro.Rev, ro.Count) - s.txnEnd(id) - - rangeCounter.Inc() - - r = &RangeResult{ - KVs: kvs, - Count: count, - Rev: rev, - } - - return r, err -} - -func (s *store) DeleteRange(key, end []byte) (n, rev int64) { - id := s.TxnBegin() - n = s.deleteRange(key, end) - s.txnEnd(id) - - deleteCounter.Inc() - - return n, int64(s.currentRev.main) -} - -func (s *store) TxnBegin() int64 { - s.mu.Lock() - s.currentRev.sub = 0 - s.tx = s.b.BatchTx() - s.tx.Lock() - - s.txnID = rand.Int63() - return s.txnID -} - -func (s *store) TxnEnd(txnID int64) error { - err := s.txnEnd(txnID) - if err != nil { - return err - } - - txnCounter.Inc() - return nil -} - -// txnEnd is used for unlocking an internal txn. It does -// not increase the txnCounter. -func (s *store) txnEnd(txnID int64) error { - if txnID != s.txnID { - return ErrTxnIDMismatch - } - - // only update index if the txn modifies the mvcc state. - // read only txn might execute with one write txn concurrently, - // it should not write its index to mvcc. - if s.txnModify { - s.saveIndex() - } - s.txnModify = false - - s.tx.Unlock() - if s.currentRev.sub != 0 { - s.currentRev.main += 1 - } - s.currentRev.sub = 0 - - dbTotalSize.Set(float64(s.b.Size())) - s.mu.Unlock() - return nil -} - -func (s *store) TxnRange(txnID int64, key, end []byte, ro RangeOptions) (r *RangeResult, err error) { - if txnID != s.txnID { - return nil, ErrTxnIDMismatch - } - - kvs, count, rev, err := s.rangeKeys(key, end, ro.Limit, ro.Rev, ro.Count) - - r = &RangeResult{ - KVs: kvs, - Count: count, - Rev: rev, - } - return r, err -} - -func (s *store) TxnPut(txnID int64, key, value []byte, lease lease.LeaseID) (rev int64, err error) { - if txnID != s.txnID { - return 0, ErrTxnIDMismatch - } - - s.put(key, value, lease) - return int64(s.currentRev.main + 1), nil -} - -func (s *store) TxnDeleteRange(txnID int64, key, end []byte) (n, rev int64, err error) { - if txnID != s.txnID { - return 0, 0, ErrTxnIDMismatch - } - - n = s.deleteRange(key, end) - if n != 0 || s.currentRev.sub != 0 { - rev = int64(s.currentRev.main + 1) - } else { - rev = int64(s.currentRev.main) - } - return n, rev, nil -} - func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { if ctx == nil || ctx.Err() != nil { s.mu.Lock() @@ -275,16 +149,25 @@ func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) { close(ch) } +func (s *store) Hash() (hash uint32, revision int64, err error) { + s.b.ForceCommit() + h, err := s.b.Hash(DefaultIgnores) + return h, s.currentRev, err +} + func (s *store) Compact(rev int64) (<-chan struct{}, error) { s.mu.Lock() defer s.mu.Unlock() + s.revMu.Lock() + defer s.revMu.Unlock() + if rev <= s.compactMainRev { ch := make(chan struct{}) f := func(ctx context.Context) { s.compactBarrier(ctx, ch) } s.fifoSched.Schedule(f) return ch, ErrCompacted } - if rev > s.currentRev.main { + if rev > s.currentRev { return nil, ErrFutureRev } @@ -333,24 +216,14 @@ func init() { } } -func (s *store) Hash() (uint32, int64, error) { - s.mu.Lock() - defer s.mu.Unlock() - s.b.ForceCommit() - - h, err := s.b.Hash(DefaultIgnores) - rev := s.currentRev.main - return h, rev, err -} - func (s *store) Commit() { s.mu.Lock() defer s.mu.Unlock() - s.tx = s.b.BatchTx() - s.tx.Lock() - s.saveIndex() - s.tx.Unlock() + tx := s.b.BatchTx() + tx.Lock() + s.saveIndex(tx) + tx.Unlock() s.b.ForceCommit() } @@ -363,10 +236,8 @@ func (s *store) Restore(b backend.Backend) error { s.b = b s.kvindex = newTreeIndex() - s.currentRev = revision{main: 1} + s.currentRev = 1 s.compactMainRev = -1 - s.tx = b.BatchTx() - s.txnID = -1 s.fifoSched = schedule.NewFIFOScheduler() s.stopc = make(chan struct{}) @@ -374,75 +245,63 @@ func (s *store) Restore(b backend.Backend) error { } func (s *store) restore() error { + reportDbTotalSizeInBytesMu.Lock() + b := s.b + reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) } + reportDbTotalSizeInBytesMu.Unlock() + min, max := newRevBytes(), newRevBytes() revToBytes(revision{main: 1}, min) revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max) keyToLease := make(map[string]lease.LeaseID) - // use an unordered map to hold the temp index data to speed up - // the initial key index recovery. - // we will convert this unordered map into the tree index later. - unordered := make(map[string]*keyIndex, 100000) - // restore index tx := s.b.BatchTx() tx.Lock() + _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0) if len(finishedCompactBytes) != 0 { s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main plog.Printf("restore compact to %d", s.compactMainRev) } + _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) + scheduledCompact := int64(0) + if len(scheduledCompactBytes) != 0 { + scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main + } - // TODO: limit N to reduce max memory usage - keys, vals := tx.UnsafeRange(keyBucketName, min, max, 0) - for i, key := range keys { - var kv mvccpb.KeyValue - if err := kv.Unmarshal(vals[i]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) + // index keys concurrently as they're loaded in from tx + keysGauge.Set(0) + rkvc, revc := restoreIntoIndex(s.kvindex) + for { + keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys)) + if len(keys) == 0 { + break } - - rev := bytesToRev(key[:revBytesLen]) - - // restore index - switch { - case isTombstone(key): - if ki, ok := unordered[string(kv.Key)]; ok { - ki.tombstone(rev.main, rev.sub) - } - delete(keyToLease, string(kv.Key)) - - default: - ki, ok := unordered[string(kv.Key)] - if ok { - ki.put(rev.main, rev.sub) - } else { - ki = &keyIndex{key: kv.Key} - ki.restore(revision{kv.CreateRevision, 0}, rev, kv.Version) - unordered[string(kv.Key)] = ki - } - - if lid := lease.LeaseID(kv.Lease); lid != lease.NoLease { - keyToLease[string(kv.Key)] = lid - } else { - delete(keyToLease, string(kv.Key)) - } + // rkvc blocks if the total pending keys exceeds the restore + // chunk size to keep keys from consuming too much memory. + restoreChunk(rkvc, keys, vals, keyToLease) + if len(keys) < restoreChunkKeys { + // partial set implies final set + break } - - // update revision - s.currentRev = rev - } - - // restore the tree index from the unordered index. - for _, v := range unordered { - s.kvindex.Insert(v) + // next set begins after where this one ended + newMin := bytesToRev(keys[len(keys)-1][:revBytesLen]) + newMin.sub++ + revToBytes(newMin, min) } + close(rkvc) + s.currentRev = <-revc // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction. // the correct revision should be set to compaction revision in the case, not the largest revision // we have seen. - if s.currentRev.main < s.compactMainRev { - s.currentRev.main = s.compactMainRev + if s.currentRev < s.compactMainRev { + s.currentRev = s.compactMainRev + } + if scheduledCompact <= s.compactMainRev { + scheduledCompact = 0 } for key, lid := range keyToLease { @@ -455,15 +314,6 @@ func (s *store) restore() error { } } - _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0) - scheduledCompact := int64(0) - if len(scheduledCompactBytes) != 0 { - scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main - if scheduledCompact <= s.compactMainRev { - scheduledCompact = 0 - } - } - tx.Unlock() if scheduledCompact != 0 { @@ -474,6 +324,75 @@ func (s *store) restore() error { return nil } +type revKeyValue struct { + key []byte + kv mvccpb.KeyValue + kstr string +} + +func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) { + rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1) + go func() { + currentRev := int64(1) + defer func() { revc <- currentRev }() + // restore the tree index from streaming the unordered index. + kiCache := make(map[string]*keyIndex, restoreChunkKeys) + for rkv := range rkvc { + ki, ok := kiCache[rkv.kstr] + // purge kiCache if many keys but still missing in the cache + if !ok && len(kiCache) >= restoreChunkKeys { + i := 10 + for k := range kiCache { + delete(kiCache, k) + if i--; i == 0 { + break + } + } + } + // cache miss, fetch from tree index if there + if !ok { + ki = &keyIndex{key: rkv.kv.Key} + if idxKey := idx.KeyIndex(ki); idxKey != nil { + kiCache[rkv.kstr], ki = idxKey, idxKey + ok = true + } + } + rev := bytesToRev(rkv.key) + currentRev = rev.main + if ok { + if isTombstone(rkv.key) { + ki.tombstone(rev.main, rev.sub) + continue + } + ki.put(rev.main, rev.sub) + } else if !isTombstone(rkv.key) { + ki.restore(revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version) + idx.Insert(ki) + kiCache[rkv.kstr] = ki + } + } + }() + return rkvc, revc +} + +func restoreChunk(kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) { + for i, key := range keys { + rkv := revKeyValue{key: key} + if err := rkv.kv.Unmarshal(vals[i]); err != nil { + plog.Fatalf("cannot unmarshal event: %v", err) + } + rkv.kstr = string(rkv.kv.Key) + if isTombstone(key) { + delete(keyToLease, rkv.kstr) + } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease { + keyToLease[rkv.kstr] = lid + } else { + delete(keyToLease, rkv.kstr) + } + kvc <- rkv + } +} + func (s *store) Close() error { close(s.stopc) s.fifoSched.Stop() @@ -490,180 +409,10 @@ func (a *store) Equal(b *store) bool { return a.kvindex.Equal(b.kvindex) } -// range is a keyword in Go, add Keys suffix. -func (s *store) rangeKeys(key, end []byte, limit, rangeRev int64, countOnly bool) (kvs []mvccpb.KeyValue, count int, curRev int64, err error) { - curRev = int64(s.currentRev.main) - if s.currentRev.sub > 0 { - curRev += 1 - } - - if rangeRev > curRev { - return nil, -1, s.currentRev.main, ErrFutureRev - } - var rev int64 - if rangeRev <= 0 { - rev = curRev - } else { - rev = rangeRev - } - if rev < s.compactMainRev { - return nil, -1, 0, ErrCompacted - } - - _, revpairs := s.kvindex.Range(key, end, int64(rev)) - if len(revpairs) == 0 { - return nil, 0, curRev, nil - } - if countOnly { - return nil, len(revpairs), curRev, nil - } - - for _, revpair := range revpairs { - start, end := revBytesRange(revpair) - - _, vs := s.tx.UnsafeRange(keyBucketName, start, end, 0) - if len(vs) != 1 { - plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) - } - - var kv mvccpb.KeyValue - if err := kv.Unmarshal(vs[0]); err != nil { - plog.Fatalf("cannot unmarshal event: %v", err) - } - kvs = append(kvs, kv) - if limit > 0 && len(kvs) >= int(limit) { - break - } - } - return kvs, len(revpairs), curRev, nil -} - -func (s *store) put(key, value []byte, leaseID lease.LeaseID) { - s.txnModify = true - - rev := s.currentRev.main + 1 - c := rev - oldLease := lease.NoLease - - // if the key exists before, use its previous created and - // get its previous leaseID - _, created, ver, err := s.kvindex.Get(key, rev) - if err == nil { - c = created.main - oldLease = s.le.GetLease(lease.LeaseItem{Key: string(key)}) - } - - ibytes := newRevBytes() - revToBytes(revision{main: rev, sub: s.currentRev.sub}, ibytes) - - ver = ver + 1 - kv := mvccpb.KeyValue{ - Key: key, - Value: value, - CreateRevision: c, - ModRevision: rev, - Version: ver, - Lease: int64(leaseID), - } - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - s.tx.UnsafeSeqPut(keyBucketName, ibytes, d) - s.kvindex.Put(key, revision{main: rev, sub: s.currentRev.sub}) - s.changes = append(s.changes, kv) - s.currentRev.sub += 1 - - if oldLease != lease.NoLease { - if s.le == nil { - panic("no lessor to detach lease") - } - - err = s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) - if err != nil { - plog.Errorf("unexpected error from lease detach: %v", err) - } - } - - if leaseID != lease.NoLease { - if s.le == nil { - panic("no lessor to attach lease") - } - - err = s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}}) - if err != nil { - panic("unexpected error from lease Attach") - } - } -} - -func (s *store) deleteRange(key, end []byte) int64 { - s.txnModify = true - - rrev := s.currentRev.main - if s.currentRev.sub > 0 { - rrev += 1 - } - keys, revs := s.kvindex.Range(key, end, rrev) - - if len(keys) == 0 { - return 0 - } - - for i, key := range keys { - s.delete(key, revs[i]) - } - return int64(len(keys)) -} - -func (s *store) delete(key []byte, rev revision) { - mainrev := s.currentRev.main + 1 - - ibytes := newRevBytes() - revToBytes(revision{main: mainrev, sub: s.currentRev.sub}, ibytes) - ibytes = appendMarkTombstone(ibytes) - - kv := mvccpb.KeyValue{ - Key: key, - } - - d, err := kv.Marshal() - if err != nil { - plog.Fatalf("cannot marshal event: %v", err) - } - - s.tx.UnsafeSeqPut(keyBucketName, ibytes, d) - err = s.kvindex.Tombstone(key, revision{main: mainrev, sub: s.currentRev.sub}) - if err != nil { - plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) - } - s.changes = append(s.changes, kv) - s.currentRev.sub += 1 - - item := lease.LeaseItem{Key: string(key)} - leaseID := s.le.GetLease(item) - - if leaseID != lease.NoLease { - err = s.le.Detach(leaseID, []lease.LeaseItem{item}) - if err != nil { - plog.Errorf("cannot detach %v", err) - } - } -} - -func (s *store) getChanges() []mvccpb.KeyValue { - changes := s.changes - s.changes = make([]mvccpb.KeyValue, 0, 4) - return changes -} - -func (s *store) saveIndex() { +func (s *store) saveIndex(tx backend.BatchTx) { if s.ig == nil { return } - tx := s.tx bs := s.bytesBuf8 binary.BigEndian.PutUint64(bs, s.ig.ConsistentIndex()) // put the index into the underlying backend diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go new file mode 100644 index 000000000000..13d4d530d0ae --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go @@ -0,0 +1,253 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" + "github.com/coreos/etcd/mvcc/backend" + "github.com/coreos/etcd/mvcc/mvccpb" +) + +type storeTxnRead struct { + s *store + tx backend.ReadTx + + firstRev int64 + rev int64 +} + +func (s *store) Read() TxnRead { + s.mu.RLock() + tx := s.b.ReadTx() + s.revMu.RLock() + tx.Lock() + firstRev, rev := s.compactMainRev, s.currentRev + s.revMu.RUnlock() + return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev}) +} + +func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev } +func (tr *storeTxnRead) Rev() int64 { return tr.rev } + +func (tr *storeTxnRead) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + return tr.rangeKeys(key, end, tr.Rev(), ro) +} + +func (tr *storeTxnRead) End() { + tr.tx.Unlock() + tr.s.mu.RUnlock() +} + +type storeTxnWrite struct { + *storeTxnRead + tx backend.BatchTx + // beginRev is the revision where the txn begins; it will write to the next revision. + beginRev int64 + changes []mvccpb.KeyValue +} + +func (s *store) Write() TxnWrite { + s.mu.RLock() + tx := s.b.BatchTx() + tx.Lock() + tw := &storeTxnWrite{ + storeTxnRead: &storeTxnRead{s, tx, 0, 0}, + tx: tx, + beginRev: s.currentRev, + changes: make([]mvccpb.KeyValue, 0, 4), + } + return newMetricsTxnWrite(tw) +} + +func (tw *storeTxnWrite) Rev() int64 { return tw.beginRev } + +func (tw *storeTxnWrite) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) { + rev := tw.beginRev + if len(tw.changes) > 0 { + rev++ + } + return tw.rangeKeys(key, end, rev, ro) +} + +func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) { + if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 { + return n, int64(tw.beginRev + 1) + } + return 0, int64(tw.beginRev) +} + +func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 { + tw.put(key, value, lease) + return int64(tw.beginRev + 1) +} + +func (tw *storeTxnWrite) End() { + // only update index if the txn modifies the mvcc state. + if len(tw.changes) != 0 { + tw.s.saveIndex(tw.tx) + // hold revMu lock to prevent new read txns from opening until writeback. + tw.s.revMu.Lock() + tw.s.currentRev++ + } + tw.tx.Unlock() + if len(tw.changes) != 0 { + tw.s.revMu.Unlock() + } + tw.s.mu.RUnlock() +} + +func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) { + rev := ro.Rev + if rev > curRev { + return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev + } + if rev <= 0 { + rev = curRev + } + if rev < tr.s.compactMainRev { + return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted + } + + _, revpairs := tr.s.kvindex.Range(key, end, int64(rev)) + if len(revpairs) == 0 { + return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil + } + if ro.Count { + return &RangeResult{KVs: nil, Count: len(revpairs), Rev: curRev}, nil + } + + var kvs []mvccpb.KeyValue + for _, revpair := range revpairs { + start, end := revBytesRange(revpair) + _, vs := tr.tx.UnsafeRange(keyBucketName, start, end, 0) + if len(vs) != 1 { + plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub) + } + + var kv mvccpb.KeyValue + if err := kv.Unmarshal(vs[0]); err != nil { + plog.Fatalf("cannot unmarshal event: %v", err) + } + kvs = append(kvs, kv) + if ro.Limit > 0 && len(kvs) >= int(ro.Limit) { + break + } + } + return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil +} + +func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) { + rev := tw.beginRev + 1 + c := rev + oldLease := lease.NoLease + + // if the key exists before, use its previous created and + // get its previous leaseID + _, created, ver, err := tw.s.kvindex.Get(key, rev) + if err == nil { + c = created.main + oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)}) + } + + ibytes := newRevBytes() + idxRev := revision{main: rev, sub: int64(len(tw.changes))} + revToBytes(idxRev, ibytes) + + ver = ver + 1 + kv := mvccpb.KeyValue{ + Key: key, + Value: value, + CreateRevision: c, + ModRevision: rev, + Version: ver, + Lease: int64(leaseID), + } + + d, err := kv.Marshal() + if err != nil { + plog.Fatalf("cannot marshal event: %v", err) + } + + tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) + tw.s.kvindex.Put(key, idxRev) + tw.changes = append(tw.changes, kv) + + if oldLease != lease.NoLease { + if tw.s.le == nil { + panic("no lessor to detach lease") + } + err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}}) + if err != nil { + plog.Errorf("unexpected error from lease detach: %v", err) + } + } + if leaseID != lease.NoLease { + if tw.s.le == nil { + panic("no lessor to attach lease") + } + err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}}) + if err != nil { + panic("unexpected error from lease Attach") + } + } +} + +func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 { + rrev := tw.beginRev + if len(tw.changes) > 0 { + rrev += 1 + } + keys, revs := tw.s.kvindex.Range(key, end, rrev) + if len(keys) == 0 { + return 0 + } + for i, key := range keys { + tw.delete(key, revs[i]) + } + return int64(len(keys)) +} + +func (tw *storeTxnWrite) delete(key []byte, rev revision) { + ibytes := newRevBytes() + idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))} + revToBytes(idxRev, ibytes) + ibytes = appendMarkTombstone(ibytes) + + kv := mvccpb.KeyValue{Key: key} + + d, err := kv.Marshal() + if err != nil { + plog.Fatalf("cannot marshal event: %v", err) + } + + tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d) + err = tw.s.kvindex.Tombstone(key, idxRev) + if err != nil { + plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err) + } + tw.changes = append(tw.changes, kv) + + item := lease.LeaseItem{Key: string(key)} + leaseID := tw.s.le.GetLease(item) + + if leaseID != lease.NoLease { + err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item}) + if err != nil { + plog.Errorf("cannot detach %v", err) + } + } +} + +func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes } diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics.go b/vendor/github.com/coreos/etcd/mvcc/metrics.go index aa8af6aa5525..a65fe59b996d 100644 --- a/vendor/github.com/coreos/etcd/mvcc/metrics.go +++ b/vendor/github.com/coreos/etcd/mvcc/metrics.go @@ -15,6 +15,8 @@ package mvcc import ( + "sync" + "github.com/prometheus/client_golang/prometheus" ) @@ -129,12 +131,21 @@ var ( Buckets: prometheus.ExponentialBuckets(100, 2, 14), }) - dbTotalSize = prometheus.NewGauge(prometheus.GaugeOpts{ + dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Namespace: "etcd_debugging", Subsystem: "mvcc", Name: "db_total_size_in_bytes", Help: "Total size of the underlying database in bytes.", - }) + }, + func() float64 { + reportDbTotalSizeInBytesMu.RLock() + defer reportDbTotalSizeInBytesMu.RUnlock() + return reportDbTotalSizeInBytes() + }, + ) + // overridden by mvcc initialization + reportDbTotalSizeInBytesMu sync.RWMutex + reportDbTotalSizeInBytes func() float64 = func() float64 { return 0 } ) func init() { diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go new file mode 100644 index 000000000000..fd2144279aea --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go @@ -0,0 +1,67 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/lease" +) + +type metricsTxnWrite struct { + TxnWrite + ranges uint + puts uint + deletes uint +} + +func newMetricsTxnRead(tr TxnRead) TxnRead { + return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0} +} + +func newMetricsTxnWrite(tw TxnWrite) TxnWrite { + return &metricsTxnWrite{tw, 0, 0, 0} +} + +func (tw *metricsTxnWrite) Range(key, end []byte, ro RangeOptions) (*RangeResult, error) { + tw.ranges++ + return tw.TxnWrite.Range(key, end, ro) +} + +func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) { + tw.deletes++ + return tw.TxnWrite.DeleteRange(key, end) +} + +func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) { + tw.puts++ + return tw.TxnWrite.Put(key, value, lease) +} + +func (tw *metricsTxnWrite) End() { + defer tw.TxnWrite.End() + if sum := tw.ranges + tw.puts + tw.deletes; sum != 1 { + if sum > 1 { + txnCounter.Inc() + } + return + } + switch { + case tw.ranges == 1: + rangeCounter.Inc() + case tw.puts == 1: + putCounter.Inc() + case tw.deletes == 1: + deleteCounter.Inc() + } +} diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go index aa053f4e66e0..7033f1326626 100644 --- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go +++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go @@ -713,7 +713,7 @@ func init() { proto.RegisterFile("kv.proto", fileDescriptorKv) } var fileDescriptorKv = []byte{ // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40, 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18, 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94, 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa, diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go index dbb79bcb6930..68d9ab71d271 100644 --- a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go +++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go @@ -41,10 +41,12 @@ type watchable interface { } type watchableStore struct { - mu sync.Mutex - *store + // mu protects watcher groups and batches. It should never be locked + // before locking store.mu to avoid deadlock. + mu sync.RWMutex + // victims are watcher batches that were blocked on the watch channel victims []watcherBatch victimc chan struct{} @@ -76,9 +78,11 @@ func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGet synced: newWatcherGroup(), stopc: make(chan struct{}), } + s.store.ReadView = &readView{s} + s.store.WriteView = &writeView{s} if s.le != nil { // use this store as the deleter so revokes trigger watch events - s.le.SetRangeDeleter(s) + s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() }) } s.wg.Add(2) go s.syncWatchersLoop() @@ -86,89 +90,6 @@ func newWatchableStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGet return s } -func (s *watchableStore) Put(key, value []byte, lease lease.LeaseID) (rev int64) { - s.mu.Lock() - defer s.mu.Unlock() - - rev = s.store.Put(key, value, lease) - changes := s.store.getChanges() - if len(changes) != 1 { - plog.Panicf("unexpected len(changes) != 1 after put") - } - - ev := mvccpb.Event{ - Type: mvccpb.PUT, - Kv: &changes[0], - } - s.notify(rev, []mvccpb.Event{ev}) - return rev -} - -func (s *watchableStore) DeleteRange(key, end []byte) (n, rev int64) { - s.mu.Lock() - defer s.mu.Unlock() - - n, rev = s.store.DeleteRange(key, end) - changes := s.store.getChanges() - - if len(changes) != int(n) { - plog.Panicf("unexpected len(changes) != n after deleteRange") - } - - if n == 0 { - return n, rev - } - - evs := make([]mvccpb.Event, n) - for i := range changes { - evs[i] = mvccpb.Event{ - Type: mvccpb.DELETE, - Kv: &changes[i]} - evs[i].Kv.ModRevision = rev - } - s.notify(rev, evs) - return n, rev -} - -func (s *watchableStore) TxnBegin() int64 { - s.mu.Lock() - return s.store.TxnBegin() -} - -func (s *watchableStore) TxnEnd(txnID int64) error { - err := s.store.TxnEnd(txnID) - if err != nil { - return err - } - - changes := s.getChanges() - if len(changes) == 0 { - s.mu.Unlock() - return nil - } - - rev := s.store.Rev() - evs := make([]mvccpb.Event, len(changes)) - for i, change := range changes { - switch change.CreateRevision { - case 0: - evs[i] = mvccpb.Event{ - Type: mvccpb.DELETE, - Kv: &changes[i]} - evs[i].Kv.ModRevision = rev - default: - evs[i] = mvccpb.Event{ - Type: mvccpb.PUT, - Kv: &changes[i]} - } - } - - s.notify(rev, evs) - s.mu.Unlock() - - return nil -} - func (s *watchableStore) Close() error { close(s.stopc) s.wg.Wait() @@ -186,9 +107,6 @@ func (s *watchableStore) NewWatchStream() WatchStream { } func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) { - s.mu.Lock() - defer s.mu.Unlock() - wa := &watcher{ key: key, end: end, @@ -198,21 +116,24 @@ func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch c fcs: fcs, } - s.store.mu.Lock() - synced := startRev > s.store.currentRev.main || startRev == 0 + s.mu.Lock() + s.revMu.RLock() + synced := startRev > s.store.currentRev || startRev == 0 if synced { - wa.minRev = s.store.currentRev.main + 1 + wa.minRev = s.store.currentRev + 1 if startRev > wa.minRev { wa.minRev = startRev } } - s.store.mu.Unlock() if synced { s.synced.add(wa) } else { slowWatcherGauge.Inc() s.unsynced.add(wa) } + s.revMu.RUnlock() + s.mu.Unlock() + watcherGauge.Inc() return wa, func() { s.cancelWatcher(wa) } @@ -258,17 +179,35 @@ func (s *watchableStore) cancelWatcher(wa *watcher) { s.mu.Unlock() } +func (s *watchableStore) Restore(b backend.Backend) error { + s.mu.Lock() + defer s.mu.Unlock() + err := s.store.Restore(b) + if err != nil { + return err + } + + for wa := range s.synced.watchers { + s.unsynced.watchers.add(wa) + } + s.synced = newWatcherGroup() + return nil +} + // syncWatchersLoop syncs the watcher in the unsynced map every 100ms. func (s *watchableStore) syncWatchersLoop() { defer s.wg.Done() for { - s.mu.Lock() + s.mu.RLock() st := time.Now() lastUnsyncedWatchers := s.unsynced.size() - s.syncWatchers() - unsyncedWatchers := s.unsynced.size() - s.mu.Unlock() + s.mu.RUnlock() + + unsyncedWatchers := 0 + if lastUnsyncedWatchers > 0 { + unsyncedWatchers = s.syncWatchers() + } syncDuration := time.Since(st) waitDuration := 100 * time.Millisecond @@ -295,9 +234,9 @@ func (s *watchableStore) syncVictimsLoop() { for s.moveVictims() != 0 { // try to update all victim watchers } - s.mu.Lock() + s.mu.RLock() isEmpty := len(s.victims) == 0 - s.mu.Unlock() + s.mu.RUnlock() var tickc <-chan time.Time if !isEmpty { @@ -340,8 +279,8 @@ func (s *watchableStore) moveVictims() (moved int) { // assign completed victim watchers to unsync/sync s.mu.Lock() - s.store.mu.Lock() - curRev := s.store.currentRev.main + s.store.revMu.RLock() + curRev := s.store.currentRev for w, eb := range wb { if newVictim != nil && newVictim[w] != nil { // couldn't send watch response; stays victim @@ -358,7 +297,7 @@ func (s *watchableStore) moveVictims() (moved int) { s.synced.add(w) } } - s.store.mu.Unlock() + s.store.revMu.RUnlock() s.mu.Unlock() } @@ -376,19 +315,23 @@ func (s *watchableStore) moveVictims() (moved int) { // 2. iterate over the set to get the minimum revision and remove compacted watchers // 3. use minimum revision to get all key-value pairs and send those events to watchers // 4. remove synced watchers in set from unsynced group and move to synced group -func (s *watchableStore) syncWatchers() { +func (s *watchableStore) syncWatchers() int { + s.mu.Lock() + defer s.mu.Unlock() + if s.unsynced.size() == 0 { - return + return 0 } - s.store.mu.Lock() - defer s.store.mu.Unlock() + s.store.revMu.RLock() + defer s.store.revMu.RUnlock() // in order to find key-value pairs from unsynced watchers, we need to // find min revision index, and these revisions can be used to // query the backend store of key-value pairs - curRev := s.store.currentRev.main + curRev := s.store.currentRev compactionRev := s.store.compactMainRev + wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev) minBytes, maxBytes := newRevBytes(), newRevBytes() revToBytes(revision{main: minRev}, minBytes) @@ -396,7 +339,7 @@ func (s *watchableStore) syncWatchers() { // UnsafeRange returns keys and values. And in boltdb, keys are revisions. // values are actual key-value pairs in backend. - tx := s.store.b.BatchTx() + tx := s.store.b.ReadTx() tx.Lock() revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0) evs := kvsToEvents(wg, revs, vs) @@ -446,6 +389,8 @@ func (s *watchableStore) syncWatchers() { vsz += len(v) } slowWatcherGauge.Set(float64(s.unsynced.size() + vsz)) + + return s.unsynced.size() } // kvsToEvents gets all events for the watchers from all key-value pairs @@ -511,8 +456,8 @@ func (s *watchableStore) addVictim(victim watcherBatch) { func (s *watchableStore) rev() int64 { return s.store.Rev() } func (s *watchableStore) progress(w *watcher) { - s.mu.Lock() - defer s.mu.Unlock() + s.mu.RLock() + defer s.mu.RUnlock() if _, ok := s.synced.watchers[w]; ok { w.send(WatchResponse{WatchID: w.id, Revision: s.rev()}) diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go new file mode 100644 index 000000000000..5c5bfda13413 --- /dev/null +++ b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go @@ -0,0 +1,53 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mvcc + +import ( + "github.com/coreos/etcd/mvcc/mvccpb" +) + +func (tw *watchableStoreTxnWrite) End() { + changes := tw.Changes() + if len(changes) == 0 { + tw.TxnWrite.End() + return + } + + rev := tw.Rev() + 1 + evs := make([]mvccpb.Event, len(changes)) + for i, change := range changes { + evs[i].Kv = &changes[i] + if change.CreateRevision == 0 { + evs[i].Type = mvccpb.DELETE + evs[i].Kv.ModRevision = rev + } else { + evs[i].Type = mvccpb.PUT + } + } + + // end write txn under watchable store lock so the updates are visible + // when asynchronous event posting checks the current store revision + tw.s.mu.Lock() + tw.s.notify(rev, evs) + tw.TxnWrite.End() + tw.s.mu.Unlock() +} + +type watchableStoreTxnWrite struct { + TxnWrite + s *watchableStore +} + +func (s *watchableStore) Write() TxnWrite { return &watchableStoreTxnWrite{s.store.Write(), s} } diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go index 2710c1cc9408..6ef1d0ce8bbe 100644 --- a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go +++ b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go @@ -183,7 +183,7 @@ func (wg *watcherGroup) add(wa *watcher) { // contains is whether the given key has a watcher in the group. func (wg *watcherGroup) contains(key string) bool { _, ok := wg.keyWatchers[key] - return ok || wg.ranges.Contains(adt.NewStringAffinePoint(key)) + return ok || wg.ranges.Intersects(adt.NewStringAffinePoint(key)) } // size gives the number of unique watchers in the group. diff --git a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go b/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go index 6edbe593fb41..9769771ea4fd 100644 --- a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go +++ b/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go @@ -15,6 +15,7 @@ package adt import ( + "bytes" "math" ) @@ -134,25 +135,29 @@ func (x *intervalNode) updateMax() { type nodeVisitor func(n *intervalNode) bool // visit will call a node visitor on each node that overlaps the given interval -func (x *intervalNode) visit(iv *Interval, nv nodeVisitor) { +func (x *intervalNode) visit(iv *Interval, nv nodeVisitor) bool { if x == nil { - return + return true } v := iv.Compare(&x.iv.Ivl) switch { case v < 0: - x.left.visit(iv, nv) + if !x.left.visit(iv, nv) { + return false + } case v > 0: maxiv := Interval{x.iv.Ivl.Begin, x.max} if maxiv.Compare(iv) == 0 { - x.left.visit(iv, nv) - x.right.visit(iv, nv) + if !x.left.visit(iv, nv) || !x.right.visit(iv, nv) { + return false + } } default: - nv(x) - x.left.visit(iv, nv) - x.right.visit(iv, nv) + if !x.left.visit(iv, nv) || !nv(x) || !x.right.visit(iv, nv) { + return false + } } + return true } type IntervalValue struct { @@ -402,10 +407,11 @@ func (ivt *IntervalTree) MaxHeight() int { return int((2 * math.Log2(float64(ivt.Len()+1))) + 0.5) } -// IntervalVisitor is used on tree searchs; return false to stop searching. +// IntervalVisitor is used on tree searches; return false to stop searching. type IntervalVisitor func(n *IntervalValue) bool // Visit calls a visitor function on every tree node intersecting the given interval. +// It will visit each interval [x, y) in ascending order sorted on x. func (ivt *IntervalTree) Visit(ivl Interval, ivv IntervalVisitor) { ivt.root.visit(&ivl, func(n *intervalNode) bool { return ivv(&n.iv) }) } @@ -432,8 +438,8 @@ func (ivt *IntervalTree) Find(ivl Interval) (ret *IntervalValue) { return &n.iv } -// Contains returns true if there is some tree node intersecting the given interval. -func (ivt *IntervalTree) Contains(iv Interval) bool { +// Intersects returns true if there is some tree node intersecting the given interval. +func (ivt *IntervalTree) Intersects(iv Interval) bool { x := ivt.root for x != nil && iv.Compare(&x.iv.Ivl) != 0 { if x.left != nil && x.left.max.Compare(iv.Begin) > 0 { @@ -445,6 +451,30 @@ func (ivt *IntervalTree) Contains(iv Interval) bool { return x != nil } +// Contains returns true if the interval tree's keys cover the entire given interval. +func (ivt *IntervalTree) Contains(ivl Interval) bool { + var maxEnd, minBegin Comparable + + isContiguous := true + ivt.Visit(ivl, func(n *IntervalValue) bool { + if minBegin == nil { + minBegin = n.Ivl.Begin + maxEnd = n.Ivl.End + return true + } + if maxEnd.Compare(n.Ivl.Begin) < 0 { + isContiguous = false + return false + } + if n.Ivl.End.Compare(maxEnd) > 0 { + maxEnd = n.Ivl.End + } + return true + }) + + return isContiguous && minBegin != nil && maxEnd.Compare(ivl.End) >= 0 && minBegin.Compare(ivl.Begin) <= 0 +} + // Stab returns a slice with all elements in the tree intersecting the interval. func (ivt *IntervalTree) Stab(iv Interval) (ivs []*IntervalValue) { if ivt.count == 0 { @@ -529,3 +559,32 @@ func (v Int64Comparable) Compare(c Comparable) int { } return 0 } + +// BytesAffineComparable treats empty byte arrays as > all other byte arrays +type BytesAffineComparable []byte + +func (b BytesAffineComparable) Compare(c Comparable) int { + bc := c.(BytesAffineComparable) + + if len(b) == 0 { + if len(bc) == 0 { + return 0 + } + return 1 + } + if len(bc) == 0 { + return -1 + } + + return bytes.Compare(b, bc) +} + +func NewBytesAffineInterval(begin, end []byte) Interval { + return Interval{BytesAffineComparable(begin), BytesAffineComparable(end)} +} +func NewBytesAffinePoint(b []byte) Interval { + be := make([]byte, len(b)+1) + copy(be, b) + be[len(b)] = 0 + return NewBytesAffineInterval(b, be) +} diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go b/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go new file mode 100644 index 000000000000..74499eb2737e --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package debugutil includes utility functions for debugging. +package debugutil diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go b/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go new file mode 100644 index 000000000000..8d5544a3dcac --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go @@ -0,0 +1,47 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package debugutil + +import ( + "net/http" + "net/http/pprof" + "runtime" +) + +const HTTPPrefixPProf = "/debug/pprof" + +// PProfHandlers returns a map of pprof handlers keyed by the HTTP path. +func PProfHandlers() map[string]http.Handler { + // set only when there's no existing setting + if runtime.SetMutexProfileFraction(-1) == 0 { + // 1 out of 5 mutex events are reported, on average + runtime.SetMutexProfileFraction(5) + } + + m := make(map[string]http.Handler) + + m[HTTPPrefixPProf+"/"] = http.HandlerFunc(pprof.Index) + m[HTTPPrefixPProf+"/profile"] = http.HandlerFunc(pprof.Profile) + m[HTTPPrefixPProf+"/symbol"] = http.HandlerFunc(pprof.Symbol) + m[HTTPPrefixPProf+"/cmdline"] = http.HandlerFunc(pprof.Cmdline) + m[HTTPPrefixPProf+"/trace "] = http.HandlerFunc(pprof.Trace) + m[HTTPPrefixPProf+"/heap"] = pprof.Handler("heap") + m[HTTPPrefixPProf+"/goroutine"] = pprof.Handler("goroutine") + m[HTTPPrefixPProf+"/threadcreate"] = pprof.Handler("threadcreate") + m[HTTPPrefixPProf+"/block"] = pprof.Handler("block") + m[HTTPPrefixPProf+"/mutex"] = pprof.Handler("mutex") + + return m +} diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go index aad40b75904c..fce5126c6956 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go @@ -17,6 +17,7 @@ package fileutil import ( "fmt" + "io" "io/ioutil" "os" "path/filepath" @@ -101,11 +102,11 @@ func Exist(name string) bool { // shorten the length of the file. func ZeroToEnd(f *os.File) error { // TODO: support FALLOC_FL_ZERO_RANGE - off, err := f.Seek(0, os.SEEK_CUR) + off, err := f.Seek(0, io.SeekCurrent) if err != nil { return err } - lenf, lerr := f.Seek(0, os.SEEK_END) + lenf, lerr := f.Seek(0, io.SeekEnd) if lerr != nil { return lerr } @@ -116,6 +117,6 @@ func ZeroToEnd(f *os.File) error { if err = Preallocate(f, lenf, true); err != nil { return err } - _, err = f.Seek(off, os.SEEK_SET) + _, err = f.Seek(off, io.SeekStart) return err } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go index dec25a1af44d..939fea623818 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go @@ -17,6 +17,7 @@ package fileutil import ( + "io" "os" "syscall" ) @@ -36,7 +37,7 @@ const ( var ( wrlck = syscall.Flock_t{ Type: syscall.F_WRLCK, - Whence: int16(os.SEEK_SET), + Whence: int16(io.SeekStart), Start: 0, Len: 0, } diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go index bb7f02812393..c747b7cf81f9 100644 --- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go +++ b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go @@ -14,7 +14,10 @@ package fileutil -import "os" +import ( + "io" + "os" +) // Preallocate tries to allocate the space for given // file. This operation is only supported on linux by a @@ -22,6 +25,10 @@ import "os" // If the operation is unsupported, no error will be returned. // Otherwise, the error encountered will be returned. func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { + if sizeInBytes == 0 { + // fallocate will return EINVAL if length is 0; skip + return nil + } if extendFile { return preallocExtend(f, sizeInBytes) } @@ -29,15 +36,15 @@ func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error { } func preallocExtendTrunc(f *os.File, sizeInBytes int64) error { - curOff, err := f.Seek(0, os.SEEK_CUR) + curOff, err := f.Seek(0, io.SeekCurrent) if err != nil { return err } - size, err := f.Seek(sizeInBytes, os.SEEK_END) + size, err := f.Seek(sizeInBytes, io.SeekEnd) if err != nil { return err } - if _, err = f.Seek(curOff, os.SEEK_SET); err != nil { + if _, err = f.Seek(curOff, io.SeekStart); err != nil { return err } if sizeInBytes > size { diff --git a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go b/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go index 859fc9d49e1c..09f44e7c71d9 100644 --- a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go +++ b/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go @@ -13,15 +13,6 @@ import ( "net/http" ) -func RequestCanceler(req *http.Request) func() { - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} - // GracefulClose drains http.Response.Body until it hits EOF // and closes it. This prevents TCP/TLS connections from closing, // therefore available for reuse. diff --git a/vendor/github.com/coreos/etcd/pkg/idutil/id.go b/vendor/github.com/coreos/etcd/pkg/idutil/id.go index 931beb2d0583..2da210626571 100644 --- a/vendor/github.com/coreos/etcd/pkg/idutil/id.go +++ b/vendor/github.com/coreos/etcd/pkg/idutil/id.go @@ -32,8 +32,8 @@ const ( // a node member ID. // // The initial id is in this format: -// High order byte is memberID, next 5 bytes are from timestamp, -// and low order 2 bytes are 0s. +// High order 2 bytes are from memberID, next 5 bytes are from timestamp, +// and low order one byte is a counter. // | prefix | suffix | // | 2 bytes | 5 bytes | 1 byte | // | memberID | timestamp | cnt | diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go b/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go index bb5f392b34c5..5e38dc98dbfe 100644 --- a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go +++ b/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go @@ -16,14 +16,13 @@ package netutil import ( + "context" "net" "net/url" "reflect" "sort" "time" - "golang.org/x/net/context" - "github.com/coreos/etcd/pkg/types" "github.com/coreos/pkg/capnslog" ) @@ -32,11 +31,38 @@ var ( plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/netutil") // indirection for testing - resolveTCPAddr = net.ResolveTCPAddr + resolveTCPAddr = resolveTCPAddrDefault ) const retryInterval = time.Second +// taken from go's ResolveTCP code but uses configurable ctx +func resolveTCPAddrDefault(ctx context.Context, addr string) (*net.TCPAddr, error) { + host, port, serr := net.SplitHostPort(addr) + if serr != nil { + return nil, serr + } + portnum, perr := net.DefaultResolver.LookupPort(ctx, "tcp", port) + if perr != nil { + return nil, perr + } + + var ips []net.IPAddr + if ip := net.ParseIP(host); ip != nil { + ips = []net.IPAddr{{IP: ip}} + } else { + // Try as a DNS name. + ipss, err := net.DefaultResolver.LookupIPAddr(ctx, host) + if err != nil { + return nil, err + } + ips = ipss + } + // randomize? + ip := ips[0] + return &net.TCPAddr{IP: ip.IP, Port: portnum, Zone: ip.Zone}, nil +} + // resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr. // resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames // are resolved. @@ -75,7 +101,7 @@ func resolveURL(ctx context.Context, u url.URL) (string, error) { if host == "localhost" || net.ParseIP(host) != nil { return "", nil } - tcpAddr, err := resolveTCPAddr("tcp", u.Host) + tcpAddr, err := resolveTCPAddr(ctx, u.Host) if err == nil { plog.Infof("resolving %s to %s", u.Host, tcpAddr.String()) return tcpAddr.String(), nil diff --git a/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go b/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go index 79c59b01288a..bf8528b753a0 100644 --- a/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go +++ b/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go @@ -88,8 +88,6 @@ func (f *fifo) Schedule(j Job) { } } f.pendings = append(f.pendings, j) - - return } func (f *fifo) Pending() int { diff --git a/vendor/github.com/coreos/etcd/pkg/srv/srv.go b/vendor/github.com/coreos/etcd/pkg/srv/srv.go new file mode 100644 index 000000000000..fefcbcb4b889 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/srv/srv.go @@ -0,0 +1,140 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package srv looks up DNS SRV records. +package srv + +import ( + "fmt" + "net" + "net/url" + "strings" + + "github.com/coreos/etcd/pkg/types" +) + +var ( + // indirection for testing + lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict + resolveTCPAddr = net.ResolveTCPAddr +) + +// GetCluster gets the cluster information via DNS discovery. +// Also sees each entry as a separate instance. +func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) { + tempName := int(0) + tcp2ap := make(map[string]url.URL) + + // First, resolve the apurls + for _, url := range apurls { + tcpAddr, err := resolveTCPAddr("tcp", url.Host) + if err != nil { + return nil, err + } + tcp2ap[tcpAddr.String()] = url + } + + stringParts := []string{} + updateNodeMap := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", dns) + if err != nil { + return err + } + for _, srv := range addrs { + port := fmt.Sprintf("%d", srv.Port) + host := net.JoinHostPort(srv.Target, port) + tcpAddr, terr := resolveTCPAddr("tcp", host) + if terr != nil { + err = terr + continue + } + n := "" + url, ok := tcp2ap[tcpAddr.String()] + if ok { + n = name + } + if n == "" { + n = fmt.Sprintf("%d", tempName) + tempName++ + } + // SRV records have a trailing dot but URL shouldn't. + shortHost := strings.TrimSuffix(srv.Target, ".") + urlHost := net.JoinHostPort(shortHost, port) + stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) + if ok && url.Scheme != scheme { + err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) + } + } + if len(stringParts) == 0 { + return err + } + return nil + } + + failCount := 0 + err := updateNodeMap(service+"-ssl", "https") + srvErr := make([]string, 2) + if err != nil { + srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err) + failCount++ + } + err = updateNodeMap(service, "http") + if err != nil { + srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err) + failCount++ + } + if failCount == 2 { + return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1]) + } + return stringParts, nil +} + +type SRVClients struct { + Endpoints []string + SRVs []*net.SRV +} + +// GetClient looks up the client endpoints for a service and domain. +func GetClient(service, domain string) (*SRVClients, error) { + var urls []*url.URL + var srvs []*net.SRV + + updateURLs := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", domain) + if err != nil { + return err + } + for _, srv := range addrs { + urls = append(urls, &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), + }) + } + srvs = append(srvs, addrs...) + return nil + } + + errHTTPS := updateURLs(service+"-ssl", "https") + errHTTP := updateURLs(service, "http") + + if errHTTPS != nil && errHTTP != nil { + return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) + } + + endpoints := make([]string, len(urls)) + for i := range urls { + endpoints[i] = urls[i].String() + } + return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/github.com/coreos/etcd/pkg/transport/listener.go index 4fcdb5ad9a37..3b58b41543f2 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/listener.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener.go @@ -23,22 +23,21 @@ import ( "crypto/x509/pkix" "encoding/pem" "fmt" - "log" "math/big" "net" "os" "path/filepath" + "strings" "time" - "github.com/coreos/etcd/pkg/fileutil" "github.com/coreos/etcd/pkg/tlsutil" ) -func NewListener(addr, scheme string, tlscfg *tls.Config) (l net.Listener, err error) { +func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) { if l, err = newListener(addr, scheme); err != nil { return nil, err } - return wrapTLS(addr, scheme, tlscfg, l) + return wrapTLS(addr, scheme, tlsinfo, l) } func newListener(addr string, scheme string) (net.Listener, error) { @@ -49,15 +48,11 @@ func newListener(addr string, scheme string) (net.Listener, error) { return net.Listen("tcp", addr) } -func wrapTLS(addr, scheme string, tlscfg *tls.Config, l net.Listener) (net.Listener, error) { +func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) { if scheme != "https" && scheme != "unixs" { return l, nil } - if tlscfg == nil { - l.Close() - return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", scheme+"://"+addr) - } - return tls.NewListener(l, tlscfg), nil + return newTLSListener(l, tlsinfo) } type TLSInfo struct { @@ -70,6 +65,10 @@ type TLSInfo struct { // ServerName ensures the cert matches the given host in case of discovery / virtual hosting ServerName string + // HandshakeFailure is optionally called when a connection fails to handshake. The + // connection will be closed immediately afterwards. + HandshakeFailure func(*tls.Conn, error) + selfCert bool // parseFunc exists to simplify testing. Typically, parseFunc @@ -86,7 +85,7 @@ func (info TLSInfo) Empty() bool { } func SelfCert(dirpath string, hosts []string) (info TLSInfo, err error) { - if err = fileutil.TouchDirAll(dirpath); err != nil { + if err = os.MkdirAll(dirpath, 0700); err != nil { return } @@ -173,6 +172,14 @@ func (info TLSInfo) baseConfig() (*tls.Config, error) { MinVersion: tls.VersionTLS12, ServerName: info.ServerName, } + // this only reloads certs when there's a client request + // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching + cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { + return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + } + cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) { + return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc) + } return cfg, nil } @@ -235,9 +242,6 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) { if err != nil { return nil, err } - // if given a CA, trust any host with a cert signed by the CA - log.Println("warning: ignoring ServerName for user-provided CA for backwards compatibility is deprecated") - cfg.ServerName = "" } if info.selfCert { @@ -246,31 +250,11 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) { return cfg, nil } -// ShallowCopyTLSConfig copies *tls.Config. This is only -// work-around for go-vet tests, which complains -// -// assignment copies lock value to p: crypto/tls.Config contains sync.Once contains sync.Mutex -// -// Keep up-to-date with 'go/src/crypto/tls/common.go' -func ShallowCopyTLSConfig(cfg *tls.Config) *tls.Config { - ncfg := tls.Config{ - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - SessionTicketKey: cfg.SessionTicketKey, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - } - return &ncfg +// IsClosedConnError returns true if the error is from closing listener, cmux. +// copied from golang.org/x/net/http2/http2.go +func IsClosedConnError(err error) bool { + // 'use of closed network connection' (Go <=1.8) + // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing) + // 'mux: listener closed' (cmux.ErrListenerClosed) + return err != nil && strings.Contains(err.Error(), "closed") } diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go new file mode 100644 index 000000000000..86511860335a --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go @@ -0,0 +1,217 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "strings" + "sync" +) + +// tlsListener overrides a TLS listener so it will reject client +// certificates with insufficient SAN credentials. +type tlsListener struct { + net.Listener + connc chan net.Conn + donec chan struct{} + err error + handshakeFailure func(*tls.Conn, error) +} + +func newTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) { + if tlsinfo == nil || tlsinfo.Empty() { + l.Close() + return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String()) + } + tlscfg, err := tlsinfo.ServerConfig() + if err != nil { + return nil, err + } + + hf := tlsinfo.HandshakeFailure + if hf == nil { + hf = func(*tls.Conn, error) {} + } + tlsl := &tlsListener{ + Listener: tls.NewListener(l, tlscfg), + connc: make(chan net.Conn), + donec: make(chan struct{}), + handshakeFailure: hf, + } + go tlsl.acceptLoop() + return tlsl, nil +} + +func (l *tlsListener) Accept() (net.Conn, error) { + select { + case conn := <-l.connc: + return conn, nil + case <-l.donec: + return nil, l.err + } +} + +// acceptLoop launches each TLS handshake in a separate goroutine +// to prevent a hanging TLS connection from blocking other connections. +func (l *tlsListener) acceptLoop() { + var wg sync.WaitGroup + var pendingMu sync.Mutex + + pending := make(map[net.Conn]struct{}) + ctx, cancel := context.WithCancel(context.Background()) + defer func() { + cancel() + pendingMu.Lock() + for c := range pending { + c.Close() + } + pendingMu.Unlock() + wg.Wait() + close(l.donec) + }() + + for { + conn, err := l.Listener.Accept() + if err != nil { + l.err = err + return + } + + pendingMu.Lock() + pending[conn] = struct{}{} + pendingMu.Unlock() + + wg.Add(1) + go func() { + defer func() { + if conn != nil { + conn.Close() + } + wg.Done() + }() + + tlsConn := conn.(*tls.Conn) + herr := tlsConn.Handshake() + pendingMu.Lock() + delete(pending, conn) + pendingMu.Unlock() + if herr != nil { + l.handshakeFailure(tlsConn, herr) + return + } + + st := tlsConn.ConnectionState() + if len(st.PeerCertificates) > 0 { + cert := st.PeerCertificates[0] + addr := tlsConn.RemoteAddr().String() + if cerr := checkCert(ctx, cert, addr); cerr != nil { + l.handshakeFailure(tlsConn, cerr) + return + } + } + select { + case l.connc <- tlsConn: + conn = nil + case <-ctx.Done(): + } + }() + } +} + +func checkCert(ctx context.Context, cert *x509.Certificate, remoteAddr string) error { + h, _, herr := net.SplitHostPort(remoteAddr) + if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 { + return nil + } + if herr != nil { + return herr + } + if len(cert.IPAddresses) > 0 { + cerr := cert.VerifyHostname(h) + if cerr == nil { + return nil + } + if len(cert.DNSNames) == 0 { + return cerr + } + } + if len(cert.DNSNames) > 0 { + ok, err := isHostInDNS(ctx, h, cert.DNSNames) + if ok { + return nil + } + errStr := "" + if err != nil { + errStr = " (" + err.Error() + ")" + } + return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames) + } + return nil +} + +func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) { + // reverse lookup + wildcards, names := []string{}, []string{} + for _, dns := range dnsNames { + if strings.HasPrefix(dns, "*.") { + wildcards = append(wildcards, dns[1:]) + } else { + names = append(names, dns) + } + } + lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host) + for _, name := range lnames { + // strip trailing '.' from PTR record + if name[len(name)-1] == '.' { + name = name[:len(name)-1] + } + for _, wc := range wildcards { + if strings.HasSuffix(name, wc) { + return true, nil + } + } + for _, n := range names { + if n == name { + return true, nil + } + } + } + err = lerr + + // forward lookup + for _, dns := range names { + addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns) + if lerr != nil { + err = lerr + continue + } + for _, addr := range addrs { + if addr == host { + return true, nil + } + } + } + return false, err +} + +func (l *tlsListener) Close() error { + err := l.Listener.Close() + <-l.donec + return err +} diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go index 0f4df5fbe3b2..b35e04955bb0 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go @@ -15,7 +15,6 @@ package transport import ( - "crypto/tls" "net" "time" ) @@ -23,7 +22,7 @@ import ( // NewTimeoutListener returns a listener that listens on the given address. // If read/write on the accepted connection blocks longer than its time limit, // it will return timeout error. -func NewTimeoutListener(addr string, scheme string, tlscfg *tls.Config, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { +func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) { ln, err := newListener(addr, scheme) if err != nil { return nil, err @@ -33,7 +32,7 @@ func NewTimeoutListener(addr string, scheme string, tlscfg *tls.Config, rdtimeou rdtimeoutd: rdtimeoutd, wtimeoutd: wtimeoutd, } - if ln, err = wrapTLS(addr, scheme, tlscfg, ln); err != nil { + if ln, err = wrapTLS(addr, scheme, tlsinfo, ln); err != nil { return nil, err } return ln, nil diff --git a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go index c126b6f7fa04..123e2036f0f0 100644 --- a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go +++ b/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go @@ -22,7 +22,7 @@ import ( type unixListener struct{ net.Listener } func NewUnixListener(addr string) (net.Listener, error) { - if err := os.RemoveAll(addr); err != nil { + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { return nil, err } l, err := net.Listen("unix", addr) @@ -33,7 +33,7 @@ func NewUnixListener(addr string) (net.Listener, error) { } func (ul *unixListener) Close() error { - if err := os.RemoveAll(ul.Addr().String()); err != nil { + if err := os.Remove(ul.Addr().String()); err != nil && !os.IsNotExist(err) { return err } return ul.Listener.Close() diff --git a/vendor/github.com/coreos/etcd/pkg/wait/wait.go b/vendor/github.com/coreos/etcd/pkg/wait/wait.go index 0f31eeb9790b..34fa237e825b 100644 --- a/vendor/github.com/coreos/etcd/pkg/wait/wait.go +++ b/vendor/github.com/coreos/etcd/pkg/wait/wait.go @@ -21,22 +21,29 @@ import ( "sync" ) +// Wait is an interface that provides the ability to wait and trigger events that +// are associated with IDs. type Wait interface { + // Register waits returns a chan that waits on the given ID. + // The chan will be triggered when Trigger is called with + // the same ID. Register(id uint64) <-chan interface{} + // Trigger triggers the waiting chans with the given ID. Trigger(id uint64, x interface{}) IsRegistered(id uint64) bool } -type List struct { +type list struct { l sync.Mutex m map[uint64]chan interface{} } -func New() *List { - return &List{m: make(map[uint64]chan interface{})} +// New creates a Wait. +func New() Wait { + return &list{m: make(map[uint64]chan interface{})} } -func (w *List) Register(id uint64) <-chan interface{} { +func (w *list) Register(id uint64) <-chan interface{} { w.l.Lock() defer w.l.Unlock() ch := w.m[id] @@ -49,7 +56,7 @@ func (w *List) Register(id uint64) <-chan interface{} { return ch } -func (w *List) Trigger(id uint64, x interface{}) { +func (w *list) Trigger(id uint64, x interface{}) { w.l.Lock() ch := w.m[id] delete(w.m, id) @@ -60,7 +67,7 @@ func (w *List) Trigger(id uint64, x interface{}) { } } -func (w *List) IsRegistered(id uint64) bool { +func (w *list) IsRegistered(id uint64) bool { w.l.Lock() defer w.l.Unlock() _, ok := w.m[id] diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go new file mode 100644 index 000000000000..3aa01f2052b1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go @@ -0,0 +1,165 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + "golang.org/x/net/context" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +// chanServerStream implements grpc.ServerStream with a chanStream +type chanServerStream struct { + headerc chan<- metadata.MD + trailerc chan<- metadata.MD + grpc.Stream + + headers []metadata.MD +} + +func (ss *chanServerStream) SendHeader(md metadata.MD) error { + if ss.headerc == nil { + return errAlreadySentHeader + } + outmd := make(map[string][]string) + for _, h := range append(ss.headers, md) { + for k, v := range h { + outmd[k] = v + } + } + select { + case ss.headerc <- outmd: + ss.headerc = nil + ss.headers = nil + return nil + case <-ss.Context().Done(): + } + return ss.Context().Err() +} + +func (ss *chanServerStream) SetHeader(md metadata.MD) error { + if ss.headerc == nil { + return errAlreadySentHeader + } + ss.headers = append(ss.headers, md) + return nil +} + +func (ss *chanServerStream) SetTrailer(md metadata.MD) { + ss.trailerc <- md +} + +// chanClientStream implements grpc.ClientStream with a chanStream +type chanClientStream struct { + headerc <-chan metadata.MD + trailerc <-chan metadata.MD + *chanStream +} + +func (cs *chanClientStream) Header() (metadata.MD, error) { + select { + case md := <-cs.headerc: + return md, nil + case <-cs.Context().Done(): + } + return nil, cs.Context().Err() +} + +func (cs *chanClientStream) Trailer() metadata.MD { + select { + case md := <-cs.trailerc: + return md + case <-cs.Context().Done(): + return nil + } +} + +func (cs *chanClientStream) CloseSend() error { + close(cs.chanStream.sendc) + return nil +} + +// chanStream implements grpc.Stream using channels +type chanStream struct { + recvc <-chan interface{} + sendc chan<- interface{} + ctx context.Context + cancel context.CancelFunc +} + +func (s *chanStream) Context() context.Context { return s.ctx } + +func (s *chanStream) SendMsg(m interface{}) error { + select { + case s.sendc <- m: + if err, ok := m.(error); ok { + return err + } + return nil + case <-s.ctx.Done(): + } + return s.ctx.Err() +} + +func (s *chanStream) RecvMsg(m interface{}) error { + v := m.(*interface{}) + for { + select { + case msg, ok := <-s.recvc: + if !ok { + return grpc.ErrClientConnClosing + } + if err, ok := msg.(error); ok { + return err + } + *v = msg + return nil + case <-s.ctx.Done(): + } + if len(s.recvc) == 0 { + // prioritize any pending recv messages over canceled context + break + } + } + return s.ctx.Err() +} + +func newPipeStream(ctx context.Context, ssHandler func(chanServerStream) error) chanClientStream { + // ch1 is buffered so server can send error on close + ch1, ch2 := make(chan interface{}, 1), make(chan interface{}) + headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1) + + cctx, ccancel := context.WithCancel(ctx) + cli := &chanStream{recvc: ch1, sendc: ch2, ctx: cctx, cancel: ccancel} + cs := chanClientStream{headerc, trailerc, cli} + + sctx, scancel := context.WithCancel(ctx) + srv := &chanStream{recvc: ch2, sendc: ch1, ctx: sctx, cancel: scancel} + ss := chanServerStream{headerc, trailerc, srv, nil} + + go func() { + if err := ssHandler(ss); err != nil { + select { + case srv.sendc <- err: + case <-sctx.Done(): + case <-cctx.Done(): + } + } + scancel() + ccancel() + }() + return cs +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go new file mode 100644 index 000000000000..4ddf78e15ec1 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go @@ -0,0 +1,44 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type cls2clc struct{ cls pb.ClusterServer } + +func ClusterServerToClusterClient(cls pb.ClusterServer) pb.ClusterClient { + return &cls2clc{cls} +} + +func (s *cls2clc) MemberList(ctx context.Context, r *pb.MemberListRequest, opts ...grpc.CallOption) (*pb.MemberListResponse, error) { + return s.cls.MemberList(ctx, r) +} + +func (s *cls2clc) MemberAdd(ctx context.Context, r *pb.MemberAddRequest, opts ...grpc.CallOption) (*pb.MemberAddResponse, error) { + return s.cls.MemberAdd(ctx, r) +} + +func (s *cls2clc) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest, opts ...grpc.CallOption) (*pb.MemberUpdateResponse, error) { + return s.cls.MemberUpdate(ctx, r) +} + +func (s *cls2clc) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest, opts ...grpc.CallOption) (*pb.MemberRemoveResponse, error) { + return s.cls.MemberRemove(ctx, r) +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go new file mode 100644 index 000000000000..7170be233046 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go @@ -0,0 +1,17 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package adapter provides gRPC adapters between client and server +// gRPC interfaces without needing to go through a gRPC connection. +package adapter diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go new file mode 100644 index 000000000000..383c1b9d8fb4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go @@ -0,0 +1,79 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type es2ec struct{ es v3electionpb.ElectionServer } + +func ElectionServerToElectionClient(es v3electionpb.ElectionServer) v3electionpb.ElectionClient { + return &es2ec{es} +} + +func (s *es2ec) Campaign(ctx context.Context, r *v3electionpb.CampaignRequest, opts ...grpc.CallOption) (*v3electionpb.CampaignResponse, error) { + return s.es.Campaign(ctx, r) +} + +func (s *es2ec) Proclaim(ctx context.Context, r *v3electionpb.ProclaimRequest, opts ...grpc.CallOption) (*v3electionpb.ProclaimResponse, error) { + return s.es.Proclaim(ctx, r) +} + +func (s *es2ec) Leader(ctx context.Context, r *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (*v3electionpb.LeaderResponse, error) { + return s.es.Leader(ctx, r) +} + +func (s *es2ec) Resign(ctx context.Context, r *v3electionpb.ResignRequest, opts ...grpc.CallOption) (*v3electionpb.ResignResponse, error) { + return s.es.Resign(ctx, r) +} + +func (s *es2ec) Observe(ctx context.Context, in *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (v3electionpb.Election_ObserveClient, error) { + cs := newPipeStream(ctx, func(ss chanServerStream) error { + return s.es.Observe(in, &es2ecServerStream{ss}) + }) + return &es2ecClientStream{cs}, nil +} + +// es2ecClientStream implements Election_ObserveClient +type es2ecClientStream struct{ chanClientStream } + +// es2ecServerStream implements Election_ObserveServer +type es2ecServerStream struct{ chanServerStream } + +func (s *es2ecClientStream) Send(rr *v3electionpb.LeaderRequest) error { + return s.SendMsg(rr) +} +func (s *es2ecClientStream) Recv() (*v3electionpb.LeaderResponse, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*v3electionpb.LeaderResponse), nil +} + +func (s *es2ecServerStream) Send(rr *v3electionpb.LeaderResponse) error { + return s.SendMsg(rr) +} +func (s *es2ecServerStream) Recv() (*v3electionpb.LeaderRequest, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*v3electionpb.LeaderRequest), nil +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go new file mode 100644 index 000000000000..fec401d9dd0e --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go @@ -0,0 +1,47 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +type kvs2kvc struct{ kvs pb.KVServer } + +func KvServerToKvClient(kvs pb.KVServer) pb.KVClient { + return &kvs2kvc{kvs} +} + +func (s *kvs2kvc) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (*pb.RangeResponse, error) { + return s.kvs.Range(ctx, in) +} + +func (s *kvs2kvc) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (*pb.PutResponse, error) { + return s.kvs.Put(ctx, in) +} + +func (s *kvs2kvc) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (*pb.DeleteRangeResponse, error) { + return s.kvs.DeleteRange(ctx, in) +} + +func (s *kvs2kvc) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (*pb.TxnResponse, error) { + return s.kvs.Txn(ctx, in) +} + +func (s *kvs2kvc) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (*pb.CompactionResponse, error) { + return s.kvs.Compact(ctx, in) +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go new file mode 100644 index 000000000000..d471fd9144be --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go @@ -0,0 +1,77 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + "golang.org/x/net/context" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "google.golang.org/grpc" +) + +type ls2lc struct { + leaseServer pb.LeaseServer +} + +func LeaseServerToLeaseClient(ls pb.LeaseServer) pb.LeaseClient { + return &ls2lc{ls} +} + +func (c *ls2lc) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (*pb.LeaseGrantResponse, error) { + return c.leaseServer.LeaseGrant(ctx, in) +} + +func (c *ls2lc) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (*pb.LeaseRevokeResponse, error) { + return c.leaseServer.LeaseRevoke(ctx, in) +} + +func (c *ls2lc) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (pb.Lease_LeaseKeepAliveClient, error) { + cs := newPipeStream(ctx, func(ss chanServerStream) error { + return c.leaseServer.LeaseKeepAlive(&ls2lcServerStream{ss}) + }) + return &ls2lcClientStream{cs}, nil +} + +func (c *ls2lc) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*pb.LeaseTimeToLiveResponse, error) { + return c.leaseServer.LeaseTimeToLive(ctx, in) +} + +// ls2lcClientStream implements Lease_LeaseKeepAliveClient +type ls2lcClientStream struct{ chanClientStream } + +// ls2lcServerStream implements Lease_LeaseKeepAliveServer +type ls2lcServerStream struct{ chanServerStream } + +func (s *ls2lcClientStream) Send(rr *pb.LeaseKeepAliveRequest) error { + return s.SendMsg(rr) +} +func (s *ls2lcClientStream) Recv() (*pb.LeaseKeepAliveResponse, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.LeaseKeepAliveResponse), nil +} + +func (s *ls2lcServerStream) Send(rr *pb.LeaseKeepAliveResponse) error { + return s.SendMsg(rr) +} +func (s *ls2lcServerStream) Recv() (*pb.LeaseKeepAliveRequest, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.LeaseKeepAliveRequest), nil +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go new file mode 100644 index 000000000000..05e5cb020a16 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go @@ -0,0 +1,36 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type ls2lsc struct{ ls v3lockpb.LockServer } + +func LockServerToLockClient(ls v3lockpb.LockServer) v3lockpb.LockClient { + return &ls2lsc{ls} +} + +func (s *ls2lsc) Lock(ctx context.Context, r *v3lockpb.LockRequest, opts ...grpc.CallOption) (*v3lockpb.LockResponse, error) { + return s.ls.Lock(ctx, r) +} + +func (s *ls2lsc) Unlock(ctx context.Context, r *v3lockpb.UnlockRequest, opts ...grpc.CallOption) (*v3lockpb.UnlockResponse, error) { + return s.ls.Unlock(ctx, r) +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go new file mode 100644 index 000000000000..9b21bf2576e2 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go @@ -0,0 +1,79 @@ +// Copyright 2017 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +type mts2mtc struct{ mts pb.MaintenanceServer } + +func MaintenanceServerToMaintenanceClient(mts pb.MaintenanceServer) pb.MaintenanceClient { + return &mts2mtc{mts} +} + +func (s *mts2mtc) Alarm(ctx context.Context, r *pb.AlarmRequest, opts ...grpc.CallOption) (*pb.AlarmResponse, error) { + return s.mts.Alarm(ctx, r) +} + +func (s *mts2mtc) Status(ctx context.Context, r *pb.StatusRequest, opts ...grpc.CallOption) (*pb.StatusResponse, error) { + return s.mts.Status(ctx, r) +} + +func (s *mts2mtc) Defragment(ctx context.Context, dr *pb.DefragmentRequest, opts ...grpc.CallOption) (*pb.DefragmentResponse, error) { + return s.mts.Defragment(ctx, dr) +} + +func (s *mts2mtc) Hash(ctx context.Context, r *pb.HashRequest, opts ...grpc.CallOption) (*pb.HashResponse, error) { + return s.mts.Hash(ctx, r) +} + +func (s *mts2mtc) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (pb.Maintenance_SnapshotClient, error) { + cs := newPipeStream(ctx, func(ss chanServerStream) error { + return s.mts.Snapshot(in, &ss2scServerStream{ss}) + }) + return &ss2scClientStream{cs}, nil +} + +// ss2scClientStream implements Maintenance_SnapshotClient +type ss2scClientStream struct{ chanClientStream } + +// ss2scServerStream implements Maintenance_SnapshotServer +type ss2scServerStream struct{ chanServerStream } + +func (s *ss2scClientStream) Send(rr *pb.SnapshotRequest) error { + return s.SendMsg(rr) +} +func (s *ss2scClientStream) Recv() (*pb.SnapshotResponse, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.SnapshotResponse), nil +} + +func (s *ss2scServerStream) Send(rr *pb.SnapshotResponse) error { + return s.SendMsg(rr) +} +func (s *ss2scServerStream) Recv() (*pb.SnapshotRequest, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.SnapshotRequest), nil +} diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go new file mode 100644 index 000000000000..af4a13c41520 --- /dev/null +++ b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go @@ -0,0 +1,66 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package adapter + +import ( + "errors" + + pb "github.com/coreos/etcd/etcdserver/etcdserverpb" + "golang.org/x/net/context" + "google.golang.org/grpc" +) + +var errAlreadySentHeader = errors.New("adapter: already sent header") + +type ws2wc struct{ wserv pb.WatchServer } + +func WatchServerToWatchClient(wserv pb.WatchServer) pb.WatchClient { + return &ws2wc{wserv} +} + +func (s *ws2wc) Watch(ctx context.Context, opts ...grpc.CallOption) (pb.Watch_WatchClient, error) { + cs := newPipeStream(ctx, func(ss chanServerStream) error { + return s.wserv.Watch(&ws2wcServerStream{ss}) + }) + return &ws2wcClientStream{cs}, nil +} + +// ws2wcClientStream implements Watch_WatchClient +type ws2wcClientStream struct{ chanClientStream } + +// ws2wcServerStream implements Watch_WatchServer +type ws2wcServerStream struct{ chanServerStream } + +func (s *ws2wcClientStream) Send(wr *pb.WatchRequest) error { + return s.SendMsg(wr) +} +func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.WatchResponse), nil +} + +func (s *ws2wcServerStream) Send(wr *pb.WatchResponse) error { + return s.SendMsg(wr) +} +func (s *ws2wcServerStream) Recv() (*pb.WatchRequest, error) { + var v interface{} + if err := s.RecvMsg(&v); err != nil { + return nil, err + } + return v.(*pb.WatchRequest), nil +} diff --git a/vendor/github.com/coreos/etcd/raft/README.md b/vendor/github.com/coreos/etcd/raft/README.md index a724b9585790..f485b839771b 100644 --- a/vendor/github.com/coreos/etcd/raft/README.md +++ b/vendor/github.com/coreos/etcd/raft/README.md @@ -13,9 +13,7 @@ To keep the codebase small as well as provide flexibility, the library only impl In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine. The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output. -A simple example application, _raftexample_, is also available to help illustrate -how to use this package in practice: -https://github.com/coreos/etcd/tree/master/contrib/raftexample +A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/coreos/etcd/tree/master/contrib/raftexample # Features @@ -51,11 +49,11 @@ This raft implementation also includes a few optional enhancements: - [etcd](https://github.com/coreos/etcd) A distributed reliable key-value store - [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft - [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale. +- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks ## Usage -The primary object in raft is a Node. You either start a Node from scratch -using raft.StartNode or start a Node from some initial state using raft.RestartNode. +The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode. To start a three-node cluster ```go @@ -73,7 +71,7 @@ To start a three-node cluster n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) ``` -You can start a single node cluster, like so: +Start a single node cluster, like so: ```go // Create storage and config as shown above. // Set peer list to itself, so this node can become the leader of this single-node cluster. @@ -81,7 +79,7 @@ You can start a single node cluster, like so: n := raft.StartNode(c, peers) ``` -To allow a new node to join this cluster, do not pass in any peers. First, you need add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, you can start the node with empty peer list, like so: +To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so: ```go // Create storage and config as shown above. n := raft.StartNode(c, nil) @@ -110,46 +108,21 @@ To restart a node from previous state: n := raft.RestartNode(c) ``` -Now that you are holding onto a Node you have a few responsibilities: - -First, you must read from the Node.Ready() channel and process the updates -it contains. These steps may be performed in parallel, except as noted in step -2. - -1. Write HardState, Entries, and Snapshot to persistent storage if they are -not empty. Note that when writing an Entry with Index i, any -previously-persisted entries with Index >= i must be discarded. - -2. Send all Messages to the nodes named in the To field. It is important that -no messages be sent until the latest HardState has been persisted to disk, -and all Entries written by any previous Ready batch (Messages may be sent while -entries from the same batch are being persisted). To reduce the I/O latency, an -optimization can be applied to make leader write to disk in parallel with its -followers (as explained at section 10.2.1 in Raft thesis). If any Message has type -MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be -large). Note: Marshalling messages is not thread-safe; it is important that you -make sure that no new entries are persisted while marshalling. -The easiest way to achieve this is to serialise the messages directly inside -your main raft loop. - -3. Apply Snapshot (if any) and CommittedEntries to the state machine. -If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() -to apply it to the node. The configuration change may be cancelled at this point -by setting the NodeID field to zero before calling ApplyConfChange -(but ApplyConfChange must be called one way or the other, and the decision to cancel -must be based solely on the state machine and not external information such as -the observed health of the node). - -4. Call Node.Advance() to signal readiness for the next batch of updates. -This may be done at any time after step 1, although all updates must be processed -in the order they were returned by Ready. - -Second, all persisted log entries must be made available via an -implementation of the Storage interface. The provided MemoryStorage -type can be used for this (if you repopulate its state upon a -restart), or you can supply your own disk-backed implementation. - -Third, when you receive a message from another node, pass it to Node.Step: +After creating a Node, the user has a few responsibilities: + +First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2. + +1. Write HardState, Entries, and Snapshot to persistent storage if they are not empty. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded. + +2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop. + +3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node). + +4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready. + +Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied. + +Third, after receiving a message from another node, pass it to Node.Step: ```go func recvRaftRPC(ctx context.Context, m raftpb.Message) { @@ -157,10 +130,7 @@ Third, when you receive a message from another node, pass it to Node.Step: } ``` -Finally, you need to call `Node.Tick()` at regular intervals (probably -via a `time.Ticker`). Raft has two important timeouts: heartbeat and the -election timeout. However, internally to the raft package time is -represented by an abstract "tick". +Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick". The total state machine handling loop will look something like this: @@ -190,16 +160,13 @@ The total state machine handling loop will look something like this: } ``` -To propose changes to the state machine from your node take your application -data, serialize it into a byte slice and call: +To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call: ```go n.Propose(ctx, data) ``` -If the proposal is committed, data will appear in committed entries with type -raftpb.EntryNormal. There is no guarantee that a proposed command will be -committed; you may have to re-propose after a timeout. +If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. To add or remove node in a cluster, build ConfChange struct 'cc' and call: @@ -207,8 +174,7 @@ To add or remove node in a cluster, build ConfChange struct 'cc' and call: n.ProposeConfChange(ctx, cc) ``` -After config change is committed, some committed entry with type -raftpb.EntryConfChange will be returned. You must apply it to node through: +After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through: ```go var cc raftpb.ConfChange @@ -223,25 +189,8 @@ may be reused. Node IDs must be non-zero. ## Implementation notes -This implementation is up to date with the final Raft thesis -(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our -implementation of the membership change protocol differs somewhat from -that described in chapter 4. The key invariant that membership changes -happen one node at a time is preserved, but in our implementation the -membership change takes effect when its entry is applied, not when it -is added to the log (so the entry is committed under the old -membership instead of the new). This is equivalent in terms of safety, -since the old and new configurations are guaranteed to overlap. - -To ensure that we do not attempt to commit two membership changes at -once by matching log positions (which would be unsafe since they -should have different quorum requirements), we simply disallow any -proposed membership change while any uncommitted change appears in -the leader's log. - -This approach introduces a problem when you try to remove a member -from a two-member cluster: If one of the members dies before the -other one receives the commit of the confchange entry, then the member -cannot be removed any more since the cluster cannot make progress. -For this reason it is highly recommended to use three or more nodes in -every cluster. +This implementation is up to date with the final Raft thesis (https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap. + +To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log. + +This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster. diff --git a/vendor/github.com/coreos/etcd/raft/log_unstable.go b/vendor/github.com/coreos/etcd/raft/log_unstable.go index 8ae301c3d8dd..263af9ce405e 100644 --- a/vendor/github.com/coreos/etcd/raft/log_unstable.go +++ b/vendor/github.com/coreos/etcd/raft/log_unstable.go @@ -85,6 +85,26 @@ func (u *unstable) stableTo(i, t uint64) { if gt == t && i >= u.offset { u.entries = u.entries[i+1-u.offset:] u.offset = i + 1 + u.shrinkEntriesArray() + } +} + +// shrinkEntriesArray discards the underlying array used by the entries slice +// if most of it isn't being used. This avoids holding references to a bunch of +// potentially large entries that aren't needed anymore. Simply clearing the +// entries wouldn't be safe because clients might still be using them. +func (u *unstable) shrinkEntriesArray() { + // We replace the array if we're using less than half of the space in + // it. This number is fairly arbitrary, chosen as an attempt to balance + // memory usage vs number of allocations. It could probably be improved + // with some focused tuning. + const lenMultiple = 2 + if len(u.entries) == 0 { + u.entries = nil + } else if len(u.entries)*lenMultiple < cap(u.entries) { + newEntries := make([]pb.Entry, len(u.entries)) + copy(newEntries, u.entries) + u.entries = newEntries } } diff --git a/vendor/github.com/coreos/etcd/raft/node.go b/vendor/github.com/coreos/etcd/raft/node.go index c8410fdc77fa..5da1c1193b27 100644 --- a/vendor/github.com/coreos/etcd/raft/node.go +++ b/vendor/github.com/coreos/etcd/raft/node.go @@ -83,6 +83,10 @@ type Ready struct { // If it contains a MsgSnap message, the application MUST report back to raft // when the snapshot has been received or has failed by calling ReportSnapshot. Messages []pb.Message + + // MustSync indicates whether the HardState and Entries must be synchronously + // written to disk or if an asynchronous write is permissible. + MustSync bool } func isHardStateEqual(a, b pb.HardState) bool { @@ -517,5 +521,17 @@ func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready { if len(r.readStates) != 0 { rd.ReadStates = r.readStates } + rd.MustSync = MustSync(rd.HardState, prevHardSt, len(rd.Entries)) return rd } + +// MustSync returns true if the hard state and count of Raft entries indicate +// that a synchronous write to persistent storage is required. +func MustSync(st, prevst pb.HardState, entsnum int) bool { + // Persistent state on all servers: + // (Updated on stable storage before responding to RPCs) + // currentTerm + // votedFor + // log entries[] + return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term +} diff --git a/vendor/github.com/coreos/etcd/raft/raft.go b/vendor/github.com/coreos/etcd/raft/raft.go index 7be4407ee2bb..29f203982032 100644 --- a/vendor/github.com/coreos/etcd/raft/raft.go +++ b/vendor/github.com/coreos/etcd/raft/raft.go @@ -1159,6 +1159,10 @@ func (r *raft) addNode(id uint64) { } r.setProgress(id, 0, r.raftLog.lastIndex()+1) + // When a node is first added, we should mark it as recently active. + // Otherwise, CheckQuorum may cause us to step down if it is invoked + // before the added node has a chance to communicate with us. + r.prs[id].RecentActive = true } func (r *raft) removeNode(id uint64) { diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go index 86ad31207087..4c6e79d58a0c 100644 --- a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go +++ b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go @@ -1558,25 +1558,67 @@ func (m *ConfState) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + m.Nodes = append(m.Nodes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - v |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) } - m.Nodes = append(m.Nodes, v) default: iNdEx = preIndex skippy, err := skipRaft(dAtA[iNdEx:]) @@ -1847,7 +1889,7 @@ func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) } var fileDescriptorRaft = []byte{ // 790 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0xdb, 0x46, 0x10, 0x16, 0x29, 0xea, 0x6f, 0x28, 0xcb, 0xab, 0xb5, 0x5a, 0x2c, 0x0c, 0x43, 0x55, 0x85, 0x1e, 0x04, 0x17, 0x76, 0x5b, 0x1d, 0x7a, 0xe8, 0xcd, 0x96, 0x0a, 0x58, 0x40, 0x65, 0xb8, 0xb2, 0xdc, 0x43, 0x83, 0x20, 0x58, 0x8b, 0x2b, 0x4a, 0x89, 0xc9, 0x25, 0x96, 0x2b, 0xc7, 0xbe, 0x04, 0x79, diff --git a/vendor/github.com/coreos/etcd/rafthttp/pipeline.go b/vendor/github.com/coreos/etcd/rafthttp/pipeline.go index ccd9eb78698d..d9f07c3479d9 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/pipeline.go +++ b/vendor/github.com/coreos/etcd/rafthttp/pipeline.go @@ -16,13 +16,13 @@ package rafthttp import ( "bytes" + "context" "errors" "io/ioutil" "sync" "time" "github.com/coreos/etcd/etcdserver/stats" - "github.com/coreos/etcd/pkg/httputil" "github.com/coreos/etcd/pkg/pbutil" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/raft" @@ -118,7 +118,8 @@ func (p *pipeline) post(data []byte) (err error) { req := createPostRequest(u, RaftPrefix, bytes.NewBuffer(data), "application/protobuf", p.tr.URLs, p.tr.ID, p.tr.ClusterID) done := make(chan struct{}, 1) - cancel := httputil.RequestCanceler(req) + ctx, cancel := context.WithCancel(context.Background()) + req = req.WithContext(ctx) go func() { select { case <-done: diff --git a/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go b/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go index 105b330728e8..52273c9d195e 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go +++ b/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go @@ -16,6 +16,7 @@ package rafthttp import ( "bytes" + "context" "io" "io/ioutil" "net/http" @@ -104,7 +105,9 @@ func (s *snapshotSender) send(merged snap.Message) { // post posts the given request. // It returns nil when request is sent out and processed successfully. func (s *snapshotSender) post(req *http.Request) (err error) { - cancel := httputil.RequestCanceler(req) + ctx, cancel := context.WithCancel(context.Background()) + req = req.WithContext(ctx) + defer cancel() type responseAndError struct { resp *http.Response @@ -130,7 +133,6 @@ func (s *snapshotSender) post(req *http.Request) (err error) { select { case <-s.stopc: - cancel() return errStopped case r := <-result: if r.err != nil { diff --git a/vendor/github.com/coreos/etcd/rafthttp/stream.go b/vendor/github.com/coreos/etcd/rafthttp/stream.go index e69a44ff65a1..2a6c620f56dc 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/stream.go +++ b/vendor/github.com/coreos/etcd/rafthttp/stream.go @@ -15,10 +15,10 @@ package rafthttp import ( + "context" "fmt" "io" "io/ioutil" - "net" "net/http" "path" "strings" @@ -27,6 +27,7 @@ import ( "github.com/coreos/etcd/etcdserver/stats" "github.com/coreos/etcd/pkg/httputil" + "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" "github.com/coreos/etcd/raft/raftpb" "github.com/coreos/etcd/version" @@ -51,6 +52,7 @@ var ( "2.3.0": {streamTypeMsgAppV2, streamTypeMessage}, "3.0.0": {streamTypeMsgAppV2, streamTypeMessage}, "3.1.0": {streamTypeMsgAppV2, streamTypeMessage}, + "3.2.0": {streamTypeMsgAppV2, streamTypeMessage}, } ) @@ -140,7 +142,8 @@ func (cw *streamWriter) run() { flusher http.Flusher batched int ) - tickc := time.Tick(ConnReadTimeout / 3) + tickc := time.NewTicker(ConnReadTimeout / 3) + defer tickc.Stop() unflushed := 0 plog.Infof("started streaming with peer %s (writer)", cw.peerID) @@ -212,7 +215,7 @@ func (cw *streamWriter) run() { plog.Warningf("closed an existing TCP streaming connection with peer %s (%s writer)", cw.peerID, t) } plog.Infof("established a TCP streaming connection with peer %s (%s writer)", cw.peerID, t) - heartbeatc, msgc = tickc, cw.msgc + heartbeatc, msgc = tickc.C, cw.msgc case <-cw.stopc: if cw.close() { plog.Infof("closed the TCP streaming connection with peer %s (%s writer)", cw.peerID, t) @@ -314,7 +317,7 @@ func (cr *streamReader) run() { // all data is read out case err == io.EOF: // connection is closed by the remote - case isClosedConnectionError(err): + case transport.IsClosedConnError(err): default: cr.status.deactivate(failureType{source: t.String(), action: "read"}, err.Error()) } @@ -426,14 +429,17 @@ func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) { setPeerURLsHeader(req, cr.tr.URLs) + ctx, cancel := context.WithCancel(context.Background()) + req = req.WithContext(ctx) + cr.mu.Lock() + cr.cancel = cancel select { case <-cr.stopc: cr.mu.Unlock() return nil, fmt.Errorf("stream reader is stopped") default: } - cr.cancel = httputil.RequestCanceler(req) cr.mu.Unlock() resp, err := cr.tr.streamRt.RoundTrip(req) @@ -508,11 +514,6 @@ func (cr *streamReader) resume() { cr.paused = false } -func isClosedConnectionError(err error) bool { - operr, ok := err.(*net.OpError) - return ok && operr.Err.Error() == "use of closed network connection" -} - // checkStreamSupport checks whether the stream type is supported in the // given version. func checkStreamSupport(v *semver.Version, t streamType) bool { diff --git a/vendor/github.com/coreos/etcd/rafthttp/util.go b/vendor/github.com/coreos/etcd/rafthttp/util.go index 61855c52a60c..12e548c77175 100644 --- a/vendor/github.com/coreos/etcd/rafthttp/util.go +++ b/vendor/github.com/coreos/etcd/rafthttp/util.go @@ -15,8 +15,6 @@ package rafthttp import ( - "crypto/tls" - "encoding/binary" "fmt" "io" "net" @@ -27,7 +25,6 @@ import ( "github.com/coreos/etcd/pkg/transport" "github.com/coreos/etcd/pkg/types" - "github.com/coreos/etcd/raft/raftpb" "github.com/coreos/etcd/version" "github.com/coreos/go-semver/semver" ) @@ -39,8 +36,8 @@ var ( // NewListener returns a listener for raft message transfer between peers. // It uses timeout listener to identify broken streams promptly. -func NewListener(u url.URL, tlscfg *tls.Config) (net.Listener, error) { - return transport.NewTimeoutListener(u.Host, u.Scheme, tlscfg, ConnReadTimeout, ConnWriteTimeout) +func NewListener(u url.URL, tlsinfo *transport.TLSInfo) (net.Listener, error) { + return transport.NewTimeoutListener(u.Host, u.Scheme, tlsinfo, ConnReadTimeout, ConnWriteTimeout) } // NewRoundTripper returns a roundTripper used to send requests @@ -61,31 +58,6 @@ func newStreamRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) return transport.NewTimeoutTransport(tlsInfo, dialTimeout, ConnReadTimeout, ConnWriteTimeout) } -func writeEntryTo(w io.Writer, ent *raftpb.Entry) error { - size := ent.Size() - if err := binary.Write(w, binary.BigEndian, uint64(size)); err != nil { - return err - } - b, err := ent.Marshal() - if err != nil { - return err - } - _, err = w.Write(b) - return err -} - -func readEntryFrom(r io.Reader, ent *raftpb.Entry) error { - var l uint64 - if err := binary.Read(r, binary.BigEndian, &l); err != nil { - return err - } - buf := make([]byte, int(l)) - if _, err := io.ReadFull(r, buf); err != nil { - return err - } - return ent.Unmarshal(buf) -} - // createPostRequest creates a HTTP POST request that sends raft message. func createPostRequest(u url.URL, path string, body io.Reader, ct string, urls types.URLs, from, cid types.ID) *http.Request { uu := u diff --git a/vendor/github.com/coreos/etcd/snap/db.go b/vendor/github.com/coreos/etcd/snap/db.go index ae3c743f80c1..01d897ae8611 100644 --- a/vendor/github.com/coreos/etcd/snap/db.go +++ b/vendor/github.com/coreos/etcd/snap/db.go @@ -15,6 +15,7 @@ package snap import ( + "errors" "fmt" "io" "io/ioutil" @@ -24,6 +25,8 @@ import ( "github.com/coreos/etcd/pkg/fileutil" ) +var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist") + // SaveDBFrom saves snapshot of the database from the given reader. It // guarantees the save operation is atomic. func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { @@ -41,7 +44,7 @@ func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { os.Remove(f.Name()) return n, err } - fn := filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) + fn := s.dbFilePath(id) if fileutil.Exist(fn) { os.Remove(f.Name()) return n, nil @@ -60,15 +63,15 @@ func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) { // DBFilePath returns the file path for the snapshot of the database with // given id. If the snapshot does not exist, it returns error. func (s *Snapshotter) DBFilePath(id uint64) (string, error) { - fns, err := fileutil.ReadDir(s.dir) - if err != nil { + if _, err := fileutil.ReadDir(s.dir); err != nil { return "", err } - wfn := fmt.Sprintf("%016x.snap.db", id) - for _, fn := range fns { - if fn == wfn { - return filepath.Join(s.dir, fn), nil - } + if fn := s.dbFilePath(id); fileutil.Exist(fn) { + return fn, nil } - return "", fmt.Errorf("snap: snapshot file doesn't exist") + return "", ErrNoDBSnapshot +} + +func (s *Snapshotter) dbFilePath(id uint64) string { + return filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) } diff --git a/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go b/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go index 130e2277c847..05a77ff9d06c 100644 --- a/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go +++ b/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go @@ -342,7 +342,7 @@ func init() { proto.RegisterFile("snap.proto", fileDescriptorSnap) } var fileDescriptorSnap = []byte{ // 126 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c, 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb, diff --git a/vendor/github.com/coreos/etcd/store/node.go b/vendor/github.com/coreos/etcd/store/node.go index 731327b08ba1..54159553500f 100644 --- a/vendor/github.com/coreos/etcd/store/node.go +++ b/vendor/github.com/coreos/etcd/store/node.go @@ -332,7 +332,6 @@ func (n *node) UpdateTTL(expireTime time.Time) { n.ExpireTime = expireTime // push into ttl heap n.store.ttlKeyHeap.push(n) - return } // Compare function compares node index and value with provided ones. diff --git a/vendor/github.com/coreos/etcd/store/store.go b/vendor/github.com/coreos/etcd/store/store.go index 6c19ad4c9707..edf7f21942b9 100644 --- a/vendor/github.com/coreos/etcd/store/store.go +++ b/vendor/github.com/coreos/etcd/store/store.go @@ -682,6 +682,9 @@ func (s *store) DeleteExpiredKeys(cutoff time.Time) { e := newEvent(Expire, node.Path, s.CurrentIndex, node.CreatedIndex) e.EtcdIndex = s.CurrentIndex e.PrevNode = node.Repr(false, false, s.clock) + if node.IsDir() { + e.Node.Dir = true + } callback := func(path string) { // notify function // notify the watchers with deleted set true diff --git a/vendor/github.com/coreos/etcd/store/watcher_hub.go b/vendor/github.com/coreos/etcd/store/watcher_hub.go index 6dd63f3c5419..13c23e391d91 100644 --- a/vendor/github.com/coreos/etcd/store/watcher_hub.go +++ b/vendor/github.com/coreos/etcd/store/watcher_hub.go @@ -116,7 +116,7 @@ func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeInde } func (wh *watcherHub) add(e *Event) { - e = wh.EventHistory.addEvent(e) + wh.EventHistory.addEvent(e) } // notify function accepts an event and notify to the watchers. diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go index 0173d6f11d34..b488499c686d 100644 --- a/vendor/github.com/coreos/etcd/version/version.go +++ b/vendor/github.com/coreos/etcd/version/version.go @@ -26,7 +26,7 @@ import ( var ( // MinClusterVersion is the min cluster version this etcd binary is compatible with. MinClusterVersion = "3.0.0" - Version = "3.1.10" + Version = "3.2.13" APIVersion = "unknown" // Git SHA Value will be set during build diff --git a/vendor/github.com/coreos/etcd/wal/encoder.go b/vendor/github.com/coreos/etcd/wal/encoder.go index efe58928cc8c..aac1e197e590 100644 --- a/vendor/github.com/coreos/etcd/wal/encoder.go +++ b/vendor/github.com/coreos/etcd/wal/encoder.go @@ -52,7 +52,7 @@ func newEncoder(w io.Writer, prevCrc uint32, pageOffset int) *encoder { // newFileEncoder creates a new encoder with current file offset for the page writer. func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) { - offset, err := f.Seek(0, os.SEEK_CUR) + offset, err := f.Seek(0, io.SeekCurrent) if err != nil { return nil, err } diff --git a/vendor/github.com/coreos/etcd/wal/repair.go b/vendor/github.com/coreos/etcd/wal/repair.go index ffb141616826..091036b57b9a 100644 --- a/vendor/github.com/coreos/etcd/wal/repair.go +++ b/vendor/github.com/coreos/etcd/wal/repair.go @@ -62,7 +62,7 @@ func Repair(dirpath string) bool { } defer bf.Close() - if _, err = f.Seek(0, os.SEEK_SET); err != nil { + if _, err = f.Seek(0, io.SeekStart); err != nil { plog.Errorf("could not repair %v, failed to read file", f.Name()) return false } diff --git a/vendor/github.com/coreos/etcd/wal/wal.go b/vendor/github.com/coreos/etcd/wal/wal.go index b65f64483042..2cac25c1c904 100644 --- a/vendor/github.com/coreos/etcd/wal/wal.go +++ b/vendor/github.com/coreos/etcd/wal/wal.go @@ -112,7 +112,7 @@ func Create(dirpath string, metadata []byte) (*WAL, error) { if err != nil { return nil, err } - if _, err = f.Seek(0, os.SEEK_END); err != nil { + if _, err = f.Seek(0, io.SeekEnd); err != nil { return nil, err } if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil { @@ -322,7 +322,7 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. // not all, will cause CRC errors on WAL open. Since the records // were never fully synced to disk in the first place, it's safe // to zero them out to avoid any CRC errors from new writes. - if _, err = w.tail().Seek(w.decoder.lastOffset(), os.SEEK_SET); err != nil { + if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil { return nil, state, nil, err } if err = fileutil.ZeroToEnd(w.tail().File); err != nil { @@ -361,7 +361,7 @@ func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb. // Then cut atomically rename temp wal file to a wal file. func (w *WAL) cut() error { // close old wal file; truncate to avoid wasting space if an early cut - off, serr := w.tail().Seek(0, os.SEEK_CUR) + off, serr := w.tail().Seek(0, io.SeekCurrent) if serr != nil { return serr } @@ -401,7 +401,7 @@ func (w *WAL) cut() error { return err } - off, err = w.tail().Seek(0, os.SEEK_CUR) + off, err = w.tail().Seek(0, io.SeekCurrent) if err != nil { return err } @@ -418,7 +418,7 @@ func (w *WAL) cut() error { if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil { return err } - if _, err = newTail.Seek(off, os.SEEK_SET); err != nil { + if _, err = newTail.Seek(off, io.SeekStart); err != nil { return err } @@ -552,7 +552,7 @@ func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error { return nil } - mustSync := mustSync(st, w.state, len(ents)) + mustSync := raft.MustSync(st, w.state, len(ents)) // TODO(xiangli): no more reference operator for i := range ents { @@ -564,7 +564,7 @@ func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error { return err } - curOff, err := w.tail().Seek(0, os.SEEK_CUR) + curOff, err := w.tail().Seek(0, io.SeekCurrent) if err != nil { return err } @@ -618,15 +618,6 @@ func (w *WAL) seq() uint64 { return seq } -func mustSync(st, prevst raftpb.HardState, entsnum int) bool { - // Persistent state on all servers: - // (Updated on stable storage before responding to RPCs) - // currentTerm - // votedFor - // log entries[] - return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term -} - func closeAll(rcs ...io.ReadCloser) error { for _, f := range rcs { if err := f.Close(); err != nil { diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go b/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go index e1a77d5e51a0..664fae1305bf 100644 --- a/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go +++ b/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go @@ -506,7 +506,7 @@ func init() { proto.RegisterFile("record.proto", fileDescriptorRecord) } var fileDescriptorRecord = []byte{ // 186 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce, 0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4f, 0xcc, 0x29, 0x48, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xe8, 0x83, 0x58, 0x10, 0x49, 0x25, 0x3f, 0x2e, 0xb6, 0x20, 0xb0, 0x62, 0x21, 0x09, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, diff --git a/vendor/github.com/coreos/go-oidc/.gitignore b/vendor/github.com/coreos/go-oidc/.gitignore new file mode 100644 index 000000000000..c96f2f47bc6d --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/.gitignore @@ -0,0 +1,2 @@ +/bin +/gopath diff --git a/vendor/github.com/coreos/go-oidc/.travis.yml b/vendor/github.com/coreos/go-oidc/.travis.yml new file mode 100644 index 000000000000..f2f3c9c81ffd --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - 1.7.5 + - 1.8 + +install: + - go get -v -t github.com/coreos/go-oidc/... + - go get golang.org/x/tools/cmd/cover + - go get github.com/golang/lint/golint + +script: + - ./test + +notifications: + email: false diff --git a/vendor/github.com/coreos/go-oidc/CONTRIBUTING.md b/vendor/github.com/coreos/go-oidc/CONTRIBUTING.md new file mode 100644 index 000000000000..6662073a848e --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/CONTRIBUTING.md @@ -0,0 +1,71 @@ +# How to Contribute + +CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via +GitHub pull requests. This document outlines some of the conventions on +development workflow, commit message formatting, contact points and other +resources to make it easier to get your contribution accepted. + +# Certificate of Origin + +By contributing to this project you agree to the Developer Certificate of +Origin (DCO). This document was created by the Linux Kernel community and is a +simple statement that you, as a contributor, have the legal right to make the +contribution. See the [DCO](DCO) file for details. + +# Email and Chat + +The project currently uses the general CoreOS email list and IRC channel: +- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev) +- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org + +Please avoid emailing maintainers found in the MAINTAINERS file directly. They +are very busy and read the mailing lists. + +## Getting Started + +- Fork the repository on GitHub +- Read the [README](README.md) for build and test instructions +- Play with the project, submit bugs, submit patches! + +## Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work (usually master). +- Make commits of logical units. +- Make sure your commit messages are in the proper format (see below). +- Push your changes to a topic branch in your fork of the repository. +- Make sure the tests pass, and add any new tests as appropriate. +- Submit a pull request to the original repository. + +Thanks for your contributions! + +### Format of the Commit Message + +We follow a rough convention for commit messages that is designed to answer two +questions: what changed and why. The subject line should feature the what and +the body of the commit should describe the why. + +``` +scripts: add the test-cluster command + +this uses tmux to setup a test cluster that you can easily kill and +start for debugging. + +Fixes #38 +``` + +The format can be described more formally as follows: + +``` +: + + + +