From f5bcbff0c49060b50f4f70c7ab1c4e1fe6e6b36a Mon Sep 17 00:00:00 2001 From: Vladimir Nachev Date: Tue, 12 Jan 2021 13:36:31 +0200 Subject: [PATCH 1/2] Vendor gardener/gardener@570ae178874b --- go.mod | 2 +- go.sum | 28 +- pkg/webhook/controlplane/ensurer.go | 23 +- pkg/webhook/controlplane/ensurer_test.go | 11 +- pkg/webhook/controlplaneexposure/ensurer.go | 9 +- .../controlplaneexposure/ensurer_test.go | 8 +- .../genericactuator/actuator_reconcile.go | 9 - .../extensions/pkg/terraformer/config.go | 64 +- .../extensions/pkg/terraformer/raw_state.go | 12 +- .../extensions/pkg/terraformer/state.go | 18 +- .../extensions/pkg/terraformer/terraformer.go | 25 + .../extensions/pkg/terraformer/types.go | 10 +- .../gardener/extensions/pkg/util/shoot.go | 2 +- .../extensions/pkg/webhook/context/context.go | 63 ++ .../controlplane/genericmutator/mutator.go | 110 +-- .../genericmutator/noopensurer.go | 25 +- .../extensions/pkg/webhook/registration.go | 18 + .../gardener/gardener/hack/hook-me.sh | 8 +- .../gardener/gardener/hack/test-cover.sh | 2 +- .../gardener/pkg/api/extensions/utils.go | 14 +- .../gardener/pkg/apis/core/types_project.go | 2 +- .../gardener/pkg/apis/core/types_shoot.go | 2 +- .../pkg/apis/core/v1alpha1/generated.proto | 2 +- .../pkg/apis/core/v1alpha1/types_shoot.go | 2 +- .../pkg/apis/core/v1beta1/generated.proto | 2 +- .../pkg/apis/core/v1beta1/helper/errors.go | 4 +- .../pkg/apis/core/v1beta1/types_shoot.go | 2 +- .../pkg/apis/core/validation/project.go | 10 +- .../gardener/pkg/features/features.go | 5 + .../pkg/gardenlet/features/features.go | 1 + .../gardener/extensions/terraformer/mocks.go | 27 +- .../gardener/pkg/operation/botanist/addons.go | 12 +- .../pkg/operation/botanist/botanist.go | 9 +- .../operation/botanist/clusterautoscaler.go | 2 +- .../pkg/operation/botanist/controlplane.go | 13 +- .../clusterautoscaler/cluster_autoscaler.go | 3 +- .../botanist/controlplane/etcd/etcd.go | 5 +- .../botanist/controlplane/etcd/waiter.go | 2 +- .../kube_controller_manager.go | 3 +- .../kubescheduler/kube_scheduler.go | 3 +- .../pkg/operation/botanist/extension.go | 140 +--- .../extensions/controlplane/controlplane.go | 104 +-- .../extensions/extension/extension.go | 250 +++++++ .../infrastructure/infrastructure.go | 86 ++- .../botanist/extensions/worker/worker.go | 355 ++++++++++ .../pkg/operation/botanist/health_check.go | 7 +- .../pkg/operation/botanist/infrastructure.go | 34 +- .../pkg/operation/botanist/migration.go | 38 +- .../pkg/operation/botanist/network.go | 2 +- .../pkg/operation/botanist/secrets.go | 17 +- .../seedadmission/seedadmission.go | 39 +- .../gardener/pkg/operation/botanist/waiter.go | 52 +- .../gardener/pkg/operation/botanist/worker.go | 229 +------ .../gardener/pkg/operation/common/types.go | 30 +- .../gardener/pkg/operation/common/utils.go | 41 +- .../gardener/pkg/operation/garden/garden.go | 14 +- .../gardener/pkg/operation/operation.go | 6 +- .../scheduler/configurator/configurator.go} | 26 +- .../seed/scheduler/configurator/doc.go | 17 + .../seed/scheduler/gardener_kube_scheduler.go | 421 ++++++++++++ .../gardener_kube_scheduler_bootstrap.go | 159 +++++ .../pkg/operation/seed/scheduler/types.go | 39 ++ .../seed/scheduler/v18/configurator.go | 79 +++ .../pkg/operation/seed/scheduler/v18/doc.go | 18 + .../seed/scheduler/v19/configurator.go | 79 +++ .../pkg/operation/seed/scheduler/v19/doc.go | 18 + .../seed/scheduler/v20/configurator.go | 79 +++ .../pkg/operation/seed/scheduler/v20/doc.go | 18 + .../gardener/pkg/operation/seed/seed.go | 33 +- .../gardener/pkg/operation/shoot/shoot.go | 24 +- .../gardener/pkg/operation/shoot/types.go | 27 +- .../gardener/pkg/seedadmission/constants.go | 24 + .../pkg/seedadmission/extension_crds.go | 129 ++++ .../gardener/pkg/seedadmission/utils.go | 63 ++ .../pkg/utils/kubernetes/kubernetes.go | 180 ++++- .../gardener/pkg/utils/kubernetes/sorter.go | 79 +++ .../gardener/pkg/utils/retry/retry.go | 33 +- .../gardener/pkg/utils/secrets/basic_auth.go | 27 +- .../pkg/utils/secrets/certificates.go | 12 +- .../gardener/pkg/utils/secrets/secrets.go | 7 +- .../gardener/gardener/test/framework/utils.go | 14 +- .../kube-scheduler/v18/v1alpha2/doc.go | 21 + .../kube-scheduler/v18/v1alpha2/register.go | 43 ++ .../kube-scheduler/v18/v1alpha2/types.go | 204 ++++++ .../v18/v1alpha2/zz_generated.deepcopy.go | 292 ++++++++ .../kube-scheduler/v19/v1beta1/doc.go | 21 + .../kube-scheduler/v19/v1beta1/register.go | 52 ++ .../kube-scheduler/v19/v1beta1/types.go | 306 +++++++++ .../v19/v1beta1/types_pluginargs.go | 172 +++++ .../v19/v1beta1/zz_generated.deepcopy.go | 627 ++++++++++++++++++ .../kube-scheduler/v20/v1beta1/doc.go | 21 + .../kube-scheduler/v20/v1beta1/register.go | 52 ++ .../kube-scheduler/v20/v1beta1/types.go | 306 +++++++++ .../v20/v1beta1/types_pluginargs.go | 172 +++++ .../v20/v1beta1/zz_generated.deepcopy.go | 627 ++++++++++++++++++ vendor/golang.org/x/crypto/bcrypt/base64.go | 35 - vendor/golang.org/x/crypto/bcrypt/bcrypt.go | 295 -------- vendor/google.golang.org/grpc/.travis.yml | 2 +- vendor/google.golang.org/grpc/Makefile | 3 + vendor/google.golang.org/grpc/README.md | 16 + .../grpc/balancer/balancer.go | 5 - .../grpc/balancer/base/balancer.go | 87 ++- .../grpc/balancer_conn_wrappers.go | 4 +- vendor/google.golang.org/grpc/clientconn.go | 143 ++-- .../grpc/credentials/credentials.go | 85 ++- .../google.golang.org/grpc/credentials/tls.go | 9 +- vendor/google.golang.org/grpc/dialoptions.go | 25 +- vendor/google.golang.org/grpc/go.mod | 4 +- vendor/google.golang.org/grpc/go.sum | 15 +- .../google.golang.org/grpc/grpclog/grpclog.go | 42 +- .../google.golang.org/grpc/grpclog/logger.go | 4 +- .../grpc/grpclog/loggerv2.go | 21 +- .../grpc/internal/channelz/funcs.go | 22 +- .../grpc/internal/channelz/logging.go | 100 +++ .../grpc/internal/envconfig/envconfig.go | 7 +- .../grpc/internal/grpclog/grpclog.go | 118 ++++ .../grpc/internal/grpclog/prefixLogger.go | 63 ++ .../grpc/internal/grpcutil/target.go | 55 ++ .../grpc/internal/internal.go | 2 - .../internal/resolver/dns/dns_resolver.go | 85 ++- .../grpc/internal/transport/handler_server.go | 2 +- .../grpc/internal/transport/http2_client.go | 12 +- .../grpc/resolver/resolver.go | 10 - .../grpc/resolver_conn_wrapper.go | 69 +- vendor/google.golang.org/grpc/rpc_util.go | 21 +- vendor/google.golang.org/grpc/server.go | 114 +++- .../google.golang.org/grpc/service_config.go | 4 +- vendor/google.golang.org/grpc/stream.go | 5 +- vendor/google.golang.org/grpc/trace.go | 3 - vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 5 +- vendor/k8s.io/kube-scheduler/LICENSE | 202 ++++++ vendor/k8s.io/kube-scheduler/config/v1/doc.go | 21 + .../kube-scheduler/config/v1/register.go | 45 ++ .../k8s.io/kube-scheduler/config/v1/types.go | 242 +++++++ .../config/v1/zz_generated.deepcopy.go | 375 +++++++++++ vendor/modules.txt | 21 +- 137 files changed, 7448 insertions(+), 1489 deletions(-) create mode 100644 vendor/github.com/gardener/gardener/extensions/pkg/webhook/context/context.go create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/botanist/extensions/extension/extension.go create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/botanist/extensions/worker/worker.go rename vendor/github.com/gardener/gardener/pkg/{utils/kubernetes/event.go => operation/seed/scheduler/configurator/configurator.go} (58%) create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/configurator/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/gardener_kube_scheduler.go create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/gardener_kube_scheduler_bootstrap.go create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/types.go create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v18/configurator.go create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v18/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v19/configurator.go create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v19/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v20/configurator.go create mode 100644 vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v20/doc.go create mode 100644 vendor/github.com/gardener/gardener/pkg/seedadmission/constants.go create mode 100644 vendor/github.com/gardener/gardener/pkg/seedadmission/extension_crds.go create mode 100644 vendor/github.com/gardener/gardener/pkg/seedadmission/utils.go create mode 100644 vendor/github.com/gardener/gardener/pkg/utils/kubernetes/sorter.go create mode 100644 vendor/github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2/doc.go create mode 100644 vendor/github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2/register.go create mode 100644 vendor/github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2/types.go create mode 100644 vendor/github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2/zz_generated.deepcopy.go create mode 100644 vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/doc.go create mode 100644 vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/register.go create mode 100644 vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/types.go create mode 100644 vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/types_pluginargs.go create mode 100644 vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/doc.go create mode 100644 vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/register.go create mode 100644 vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/types.go create mode 100644 vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/types_pluginargs.go create mode 100644 vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/golang.org/x/crypto/bcrypt/base64.go delete mode 100644 vendor/golang.org/x/crypto/bcrypt/bcrypt.go create mode 100644 vendor/google.golang.org/grpc/internal/channelz/logging.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/grpclog.go create mode 100644 vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go create mode 100644 vendor/google.golang.org/grpc/internal/grpcutil/target.go create mode 100644 vendor/k8s.io/kube-scheduler/LICENSE create mode 100644 vendor/k8s.io/kube-scheduler/config/v1/doc.go create mode 100644 vendor/k8s.io/kube-scheduler/config/v1/register.go create mode 100644 vendor/k8s.io/kube-scheduler/config/v1/types.go create mode 100644 vendor/k8s.io/kube-scheduler/config/v1/zz_generated.deepcopy.go diff --git a/go.mod b/go.mod index 08b95aa6d..113401c0b 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/coreos/go-systemd/v22 v22.1.0 github.com/frankban/quicktest v1.9.0 // indirect github.com/gardener/etcd-druid v0.3.0 - github.com/gardener/gardener v1.15.0 + github.com/gardener/gardener v1.15.1-0.20210112065447-570ae178874b github.com/gardener/machine-controller-manager v0.36.0 github.com/go-logr/logr v0.1.0 github.com/gobuffalo/packr/v2 v2.8.1 diff --git a/go.sum b/go.sum index af9c58e27..98f727211 100644 --- a/go.sum +++ b/go.sum @@ -93,9 +93,13 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.11.4/go.mod h1:ZB+hp7VycxPLpp0aiozQQezat46npDXhzHi1DVtRCn4= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354 h1:9kRtNpqLHbZVO/NNxhHp2ymxFxsHOe3x2efJGn//Tas= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -149,7 +153,12 @@ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.6+incompatible h1:tfrHha8zJ01ywiOEC1miGY8st1/igzWB8OmvPgoYX7w= github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7-0.20200730005029-803dd64f0468 h1:d6NP0AddmfOtmgiEBSyNU7Q8FfCXUjISIyXWU76oaAc= +github.com/envoyproxy/go-control-plane v0.9.7-0.20200730005029-803dd64f0468/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= @@ -180,8 +189,8 @@ github.com/gardener/gardener v1.1.2/go.mod h1:CP9I0tCDVXTLPkJv/jUtXVUh948kSNKEGU github.com/gardener/gardener v1.3.1/go.mod h1:936P5tQbg6ViiW8BVC9ELM95sFrk4DgobKrxMNtn/LU= github.com/gardener/gardener v1.4.1-0.20200519155656-a8ccc6cc779a/go.mod h1:t9oESM37bAMIuezi9I0H0I8+++8jy8BUPitcf4ERRXY= github.com/gardener/gardener v1.11.3/go.mod h1:5DzqfOm+G8UftKu5zUbYJ+9Cnfd4XrvRNDabkM9AIp4= -github.com/gardener/gardener v1.15.0 h1:33YMkaKPLWfUtGJ0FTCh6D3RZSr5I0RlCkwj/oBRWww= -github.com/gardener/gardener v1.15.0/go.mod h1:B6mu7i9OusC+k3DFGuiwt99xU7G4IoiwphBRsqr0lC4= +github.com/gardener/gardener v1.15.1-0.20210112065447-570ae178874b h1:zbZMGEc/kNTQIW2Bngow90xaPGdxS1egiCAiyiaFbnE= +github.com/gardener/gardener v1.15.1-0.20210112065447-570ae178874b/go.mod h1:576V45plCC5GkKWeGK4CAuZ5/SaVr6lRHZRTSHOiQ+E= github.com/gardener/gardener-resource-manager v0.10.0/go.mod h1:0pKTHOhvU91eQB0EYr/6Ymd7lXc/5Hi8P8tF/gpV0VQ= github.com/gardener/gardener-resource-manager v0.13.1/go.mod h1:0No/XttYRUwDn5lSppq9EqlKdo/XJQ44aCZz5BVu3Vw= github.com/gardener/gardener-resource-manager v0.18.0 h1:bNB0yKhSqe8DnsvIp3xZr9nsFB4fm+AUAqj1EoIvWU8= @@ -286,6 +295,7 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= @@ -311,6 +321,8 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -911,8 +923,12 @@ google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.1 h1:C1QC6KzgSiLyBabDi87BbjaGreoRgGUF5nOyvfrAZ1k= +google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -960,6 +976,12 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +istio.io/api v0.0.0-20201123152548-197f11e4ea09 h1:+3Q/a15sTRDAYN66nM+bqCDF1MsynIEpWm9CKPdPOPg= +istio.io/api v0.0.0-20201123152548-197f11e4ea09/go.mod h1:88HN3o1fSD1jo+Z1WTLlJfMm9biopur6Ct9BFKjiB64= +istio.io/client-go v1.8.1 h1:gIeAQBgXkyUuvQ5LUtvxIY5TdFuaCLZqXgxF3bigKww= +istio.io/client-go v1.8.1/go.mod h1:Qymv71lwIqjDTkaE2NqBYLL+Bl5KsCfzEDhntXypHYY= +istio.io/gogo-genproto v0.0.0-20190930162913-45029607206a h1:w7zILua2dnYo9CxImhpNW4NE/8ZxEoc/wfBfHrhUhrE= +istio.io/gogo-genproto v0.0.0-20190930162913-45029607206a/go.mod h1:OzpAts7jljZceG4Vqi5/zXy/pOg1b209T3jb7Nv5wIs= k8s.io/api v0.18.10 h1:M0/vqfuBAIIS7jsOOcosT0niiotZGqw6/zHTFpyi8iQ= k8s.io/api v0.18.10/go.mod h1:xWtwPX1v47j5RTncmlMFGCx8b0avh+nP8OgZZ9hjo3M= k8s.io/apiextensions-apiserver v0.18.10 h1:NDp1wS1mnN6ync6onO0oVWMymTYK/ZoyFj3D0YHVzAk= @@ -1002,6 +1024,8 @@ k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKf k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200410145947-bcb3869e6f29 h1:NeQXVJ2XFSkRoPzRo8AId01ZER+j8oV4SZADT4iBOXQ= k8s.io/kube-openapi v0.0.0-20200410145947-bcb3869e6f29/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU= +k8s.io/kube-scheduler v0.18.10 h1:1pAxXAiERNWKN70GslSKt2acUN/ACz/XcWseh+19Q8c= +k8s.io/kube-scheduler v0.18.10/go.mod h1:38J1X83Vc+BOOgvY9yABbeeZBzgJxfBDAazrAMOCZq0= k8s.io/kubelet v0.16.8/go.mod h1:mzDpnryQg2dlB6V3/WAgb1baIamiICtWpXMFrPOFh6I= k8s.io/kubelet v0.18.8/go.mod h1:6z1jHCk0NPE6WshFStfqcgQ1bnD3tetcPmhC2915aio= k8s.io/kubelet v0.18.10 h1:FJ1WdE/+fpXOX19TDEvWP3iG9bwWCiIv/rZvVf6iIek= diff --git a/pkg/webhook/controlplane/ensurer.go b/pkg/webhook/controlplane/ensurer.go index e5f7e3a04..8830d1b25 100644 --- a/pkg/webhook/controlplane/ensurer.go +++ b/pkg/webhook/controlplane/ensurer.go @@ -26,6 +26,7 @@ import ( "github.com/coreos/go-systemd/v22/unit" "github.com/gardener/gardener/extensions/pkg/controller/csimigration" extensionswebhook "github.com/gardener/gardener/extensions/pkg/webhook" + gcontext "github.com/gardener/gardener/extensions/pkg/webhook/context" "github.com/gardener/gardener/extensions/pkg/webhook/controlplane" "github.com/gardener/gardener/extensions/pkg/webhook/controlplane/genericmutator" v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" @@ -60,11 +61,11 @@ func (e *ensurer) InjectClient(client client.Client) error { } // EnsureKubeAPIServerDeployment ensures that the kube-apiserver deployment conforms to the provider requirements. -func (e *ensurer) EnsureKubeAPIServerDeployment(ctx context.Context, ectx genericmutator.EnsurerContext, new, _ *appsv1.Deployment) error { +func (e *ensurer) EnsureKubeAPIServerDeployment(ctx context.Context, gctx gcontext.GardenContext, new, _ *appsv1.Deployment) error { template := &new.Spec.Template ps := &template.Spec - cluster, err := ectx.GetCluster(ctx) + cluster, err := gctx.GetCluster(ctx) if err != nil { return err } @@ -85,11 +86,11 @@ func (e *ensurer) EnsureKubeAPIServerDeployment(ctx context.Context, ectx generi } // EnsureKubeControllerManagerDeployment ensures that the kube-controller-manager deployment conforms to the provider requirements. -func (e *ensurer) EnsureKubeControllerManagerDeployment(ctx context.Context, ectx genericmutator.EnsurerContext, new, _ *appsv1.Deployment) error { +func (e *ensurer) EnsureKubeControllerManagerDeployment(ctx context.Context, gctx gcontext.GardenContext, new, _ *appsv1.Deployment) error { template := &new.Spec.Template ps := &template.Spec - cluster, err := ectx.GetCluster(ctx) + cluster, err := gctx.GetCluster(ctx) if err != nil { return err } @@ -111,11 +112,11 @@ func (e *ensurer) EnsureKubeControllerManagerDeployment(ctx context.Context, ect } // EnsureKubeSchedulerDeployment ensures that the kube-scheduler deployment conforms to the provider requirements. -func (e *ensurer) EnsureKubeSchedulerDeployment(ctx context.Context, ectx genericmutator.EnsurerContext, new, _ *appsv1.Deployment) error { +func (e *ensurer) EnsureKubeSchedulerDeployment(ctx context.Context, gctx gcontext.GardenContext, new, _ *appsv1.Deployment) error { template := &new.Spec.Template ps := &template.Spec - cluster, err := ectx.GetCluster(ctx) + cluster, err := gctx.GetCluster(ctx) if err != nil { return err } @@ -375,8 +376,8 @@ func (e *ensurer) ensureChecksumAnnotations(ctx context.Context, template *corev } // EnsureKubeletServiceUnitOptions ensures that the kubelet.service unit options conform to the provider requirements. -func (e *ensurer) EnsureKubeletServiceUnitOptions(ctx context.Context, ectx genericmutator.EnsurerContext, new, _ []*unit.UnitOption) ([]*unit.UnitOption, error) { - cluster, err := ectx.GetCluster(ctx) +func (e *ensurer) EnsureKubeletServiceUnitOptions(ctx context.Context, gctx gcontext.GardenContext, new, _ []*unit.UnitOption) ([]*unit.UnitOption, error) { + cluster, err := gctx.GetCluster(ctx) if err != nil { return nil, err } @@ -412,8 +413,8 @@ func ensureKubeletCommandLineArgs(command []string, csiEnabled bool) []string { } // EnsureKubeletConfiguration ensures that the kubelet configuration conforms to the provider requirements. -func (e *ensurer) EnsureKubeletConfiguration(ctx context.Context, ectx genericmutator.EnsurerContext, new, _ *kubeletconfigv1beta1.KubeletConfiguration) error { - cluster, err := ectx.GetCluster(ctx) +func (e *ensurer) EnsureKubeletConfiguration(ctx context.Context, gctx gcontext.GardenContext, new, _ *kubeletconfigv1beta1.KubeletConfiguration) error { + cluster, err := gctx.GetCluster(ctx) if err != nil { return err } @@ -440,7 +441,7 @@ func (e *ensurer) EnsureKubeletConfiguration(ctx context.Context, ectx genericmu var regexFindProperty = regexp.MustCompile("net.ipv4.ip_forward[[:space:]]*=[[:space:]]*([[:alnum:]]+)") // EnsureKubernetesGeneralConfiguration ensures that the kubernetes general configuration conforms to the provider requirements. -func (e *ensurer) EnsureKubernetesGeneralConfiguration(ctx context.Context, _ genericmutator.EnsurerContext, new, _ *string) error { +func (e *ensurer) EnsureKubernetesGeneralConfiguration(ctx context.Context, _ gcontext.GardenContext, new, _ *string) error { // If the needed property exists, ensure the correct value if regexFindProperty.MatchString(*new) { res := regexFindProperty.ReplaceAll([]byte(*new), []byte("net.ipv4.ip_forward = 1")) diff --git a/pkg/webhook/controlplane/ensurer_test.go b/pkg/webhook/controlplane/ensurer_test.go index 4c4030ae8..15000584b 100644 --- a/pkg/webhook/controlplane/ensurer_test.go +++ b/pkg/webhook/controlplane/ensurer_test.go @@ -24,6 +24,7 @@ import ( extensionscontroller "github.com/gardener/gardener/extensions/pkg/controller" "github.com/gardener/gardener/extensions/pkg/controller/csimigration" extensionswebhook "github.com/gardener/gardener/extensions/pkg/webhook" + gcontext "github.com/gardener/gardener/extensions/pkg/webhook/context" "github.com/gardener/gardener/extensions/pkg/webhook/controlplane/genericmutator" "github.com/gardener/gardener/extensions/pkg/webhook/controlplane/test" gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" @@ -55,8 +56,8 @@ var _ = Describe("Ensurer", func() { ctrl *gomock.Controller ctx = context.TODO() - dummyContext = genericmutator.NewEnsurerContext(nil, nil) - eContextK8s116 = genericmutator.NewInternalEnsurerContext( + dummyContext = gcontext.NewGardenContext(nil, nil) + eContextK8s116 = gcontext.NewInternalGardenContext( &extensionscontroller.Cluster{ Shoot: &gardencorev1beta1.Shoot{ Spec: gardencorev1beta1.ShootSpec{ @@ -67,7 +68,7 @@ var _ = Describe("Ensurer", func() { }, }, ) - eContextK8s117 = genericmutator.NewInternalEnsurerContext( + eContextK8s117 = gcontext.NewInternalGardenContext( &extensionscontroller.Cluster{ Shoot: &gardencorev1beta1.Shoot{ Spec: gardencorev1beta1.ShootSpec{ @@ -78,7 +79,7 @@ var _ = Describe("Ensurer", func() { }, }, ) - eContextK8s118 = genericmutator.NewInternalEnsurerContext( + eContextK8s118 = gcontext.NewInternalGardenContext( &extensionscontroller.Cluster{ Shoot: &gardencorev1beta1.Shoot{ Spec: gardencorev1beta1.ShootSpec{ @@ -89,7 +90,7 @@ var _ = Describe("Ensurer", func() { }, }, ) - eContextK8s118WithCSIAnnotation = genericmutator.NewInternalEnsurerContext( + eContextK8s118WithCSIAnnotation = gcontext.NewInternalGardenContext( &extensionscontroller.Cluster{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ diff --git a/pkg/webhook/controlplaneexposure/ensurer.go b/pkg/webhook/controlplaneexposure/ensurer.go index 68caa24f3..bd84a836f 100644 --- a/pkg/webhook/controlplaneexposure/ensurer.go +++ b/pkg/webhook/controlplaneexposure/ensurer.go @@ -17,12 +17,13 @@ package controlplaneexposure import ( "context" - druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1" "github.com/gardener/gardener-extension-provider-gcp/pkg/apis/config" + + druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1" "github.com/gardener/gardener/extensions/pkg/controller" extensionswebhook "github.com/gardener/gardener/extensions/pkg/webhook" + gcontext "github.com/gardener/gardener/extensions/pkg/webhook/context" "github.com/gardener/gardener/extensions/pkg/webhook/controlplane/genericmutator" - v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" v1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper" kutil "github.com/gardener/gardener/pkg/utils/kubernetes" @@ -55,7 +56,7 @@ func (e *ensurer) InjectClient(client client.Client) error { } // EnsureKubeAPIServerDeployment ensures that the kube-apiserver deployment conforms to the provider requirements. -func (e *ensurer) EnsureKubeAPIServerDeployment(ctx context.Context, ectx genericmutator.EnsurerContext, new, old *appsv1.Deployment) error { +func (e *ensurer) EnsureKubeAPIServerDeployment(ctx context.Context, gctx gcontext.GardenContext, new, old *appsv1.Deployment) error { if v1beta1helper.IsAPIServerExposureManaged(new) { return nil } @@ -82,7 +83,7 @@ func (e *ensurer) EnsureKubeAPIServerDeployment(ctx context.Context, ectx generi } // EnsureETCD ensures that the etcd conform to the provider requirements. -func (e *ensurer) EnsureETCD(ctx context.Context, ectx genericmutator.EnsurerContext, new, old *druidv1alpha1.Etcd) error { +func (e *ensurer) EnsureETCD(ctx context.Context, gctx gcontext.GardenContext, new, old *druidv1alpha1.Etcd) error { capacity := resource.MustParse("10Gi") class := "" diff --git a/pkg/webhook/controlplaneexposure/ensurer_test.go b/pkg/webhook/controlplaneexposure/ensurer_test.go index 1772b0da4..bf215561b 100644 --- a/pkg/webhook/controlplaneexposure/ensurer_test.go +++ b/pkg/webhook/controlplaneexposure/ensurer_test.go @@ -19,16 +19,16 @@ import ( "encoding/json" "testing" - druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1" "github.com/gardener/gardener-extension-provider-gcp/pkg/apis/config" + + druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1" extensionswebhook "github.com/gardener/gardener/extensions/pkg/webhook" - "github.com/gardener/gardener/extensions/pkg/webhook/controlplane/genericmutator" + gcontext "github.com/gardener/gardener/extensions/pkg/webhook/context" gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" mockclient "github.com/gardener/gardener/pkg/mock/controller-runtime/client" "github.com/gardener/gardener/pkg/utils" - "github.com/golang/mock/gomock" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -60,7 +60,7 @@ var _ = Describe("Ensurer", func() { ctrl *gomock.Controller - dummyContext = genericmutator.NewEnsurerContext(nil, nil) + dummyContext = gcontext.NewGardenContext(nil, nil) svcKey = client.ObjectKey{Namespace: namespace, Name: v1beta1constants.DeploymentNameKubeAPIServer} svc = &corev1.Service{ diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/controller/worker/genericactuator/actuator_reconcile.go b/vendor/github.com/gardener/gardener/extensions/pkg/controller/worker/genericactuator/actuator_reconcile.go index f350324e3..48fac8f00 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/controller/worker/genericactuator/actuator_reconcile.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/controller/worker/genericactuator/actuator_reconcile.go @@ -340,7 +340,6 @@ func (a *genericActuator) deployMachineDeployments(ctx context.Context, logger l // available by the machine-controller-manager. It polls the status every 5 seconds. func (a *genericActuator) waitUntilWantedMachineDeploymentsAvailable(ctx context.Context, logger logr.Logger, cluster *extensionscontroller.Cluster, worker *extensionsv1alpha1.Worker, alreadyExistingMachineDeploymentNames sets.String, alreadyExistingMachineClassNames sets.String, wantedMachineDeployments extensionsworker.MachineDeployments, clusterAutoscalerUsed bool) error { logger.Info("Waiting until wanted machine deployments are available") - autoscalerIsScaledDown := false workerStatusUpdatedForRollingUpdate := false return retryutils.UntilTimeout(ctx, 5*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) { @@ -427,14 +426,6 @@ func (a *genericActuator) waitUntilWantedMachineDeploymentsAvailable(ctx context return retryutils.Ok() } - // scale down cluster autoscaler during creation or rolling update - if clusterAutoscalerUsed && !autoscalerIsScaledDown { - if err := a.scaleClusterAutoscaler(ctx, logger, worker, 0); err != nil { - return retryutils.SevereError(err) - } - autoscalerIsScaledDown = true - } - // update worker status with condition that indicates an ongoing rolling update operation if !workerStatusUpdatedForRollingUpdate { if err := a.updateWorkerStatusMachineDeployments(ctx, worker, extensionsworker.MachineDeployments{}, true); err != nil { diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/config.go b/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/config.go index 744cd188f..fbea07136 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/config.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/config.go @@ -62,6 +62,12 @@ func (t *terraformer) SetDeadlinePod(d time.Duration) Terraformer { return t } +// SetOwnerRef configures the resource that will be used as owner of the secrets and configmaps +func (t *terraformer) SetOwnerRef(owner *metav1.OwnerReference) Terraformer { + t.ownerRef = owner + return t +} + // UseV2 configures if it should use flags compatible with terraformer@v2. func (t *terraformer) UseV2(v2 bool) Terraformer { t.useV2 = v2 @@ -104,7 +110,7 @@ func (t *terraformer) initializerConfig(ctx context.Context) *InitializerConfig // Initializer to correctly create all the resources as specified in the given InitializerConfig. // A default implementation can be found in DefaultInitializer. func (t *terraformer) InitializeWith(ctx context.Context, initializer Initializer) Terraformer { - if err := initializer.Initialize(ctx, t.initializerConfig(ctx)); err != nil { + if err := initializer.Initialize(ctx, t.initializerConfig(ctx), t.ownerRef); err != nil { t.logger.Error(err, "Could not create Terraformer ConfigMaps/Secrets") return t } @@ -112,8 +118,13 @@ func (t *terraformer) InitializeWith(ctx context.Context, initializer Initialize return t } -func createOrUpdateConfigMap(ctx context.Context, c client.Client, namespace, name string, values map[string]string) (*corev1.ConfigMap, error) { - configMap := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}} +func createOrUpdateConfigMap(ctx context.Context, c client.Client, namespace, name string, values map[string]string, ownerRef *metav1.OwnerReference) (*corev1.ConfigMap, error) { + configMap := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + } _, err := controllerutil.CreateOrUpdate(ctx, c, configMap, func() error { if configMap.Data == nil { configMap.Data = make(map[string]string) @@ -121,6 +132,9 @@ func createOrUpdateConfigMap(ctx context.Context, c client.Client, namespace, na for key, value := range values { configMap.Data[key] = value } + if ownerRef != nil { + configMap.SetOwnerReferences(kutil.MergeOwnerReferences(configMap.OwnerReferences, *ownerRef)) + } return nil }) return configMap, err @@ -128,48 +142,64 @@ func createOrUpdateConfigMap(ctx context.Context, c client.Client, namespace, na // CreateOrUpdateConfigurationConfigMap creates or updates the Terraform configuration ConfigMap // with the given main and variables content. -func CreateOrUpdateConfigurationConfigMap(ctx context.Context, c client.Client, namespace, name, main, variables string) (*corev1.ConfigMap, error) { - return createOrUpdateConfigMap(ctx, c, namespace, name, map[string]string{ - MainKey: main, - VariablesKey: variables, - }) +func CreateOrUpdateConfigurationConfigMap(ctx context.Context, c client.Client, namespace, name, main, variables string, ownerRef *metav1.OwnerReference) (*corev1.ConfigMap, error) { + return createOrUpdateConfigMap( + ctx, + c, + namespace, + name, + map[string]string{ + MainKey: main, + VariablesKey: variables, + }, + ownerRef, + ) } // CreateOrUpdateTFVarsSecret creates or updates the Terraformer variables Secret with the given tfvars. -func CreateOrUpdateTFVarsSecret(ctx context.Context, c client.Client, namespace, name string, tfvars []byte) (*corev1.Secret, error) { - secret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}} +func CreateOrUpdateTFVarsSecret(ctx context.Context, c client.Client, namespace, name string, tfvars []byte, ownerRef *metav1.OwnerReference) (*corev1.Secret, error) { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + } + _, err := controllerutil.CreateOrUpdate(ctx, c, secret, func() error { if secret.Data == nil { secret.Data = make(map[string][]byte) } secret.Data[TFVarsKey] = tfvars + if ownerRef != nil { + secret.SetOwnerReferences(kutil.MergeOwnerReferences(secret.OwnerReferences, *ownerRef)) + } return nil }) return secret, err } // initializerFunc implements Initializer. -type initializerFunc func(ctx context.Context, config *InitializerConfig) error +type initializerFunc func(ctx context.Context, config *InitializerConfig, ownerRef *metav1.OwnerReference) error // Initialize implements Initializer. -func (f initializerFunc) Initialize(ctx context.Context, config *InitializerConfig) error { - return f(ctx, config) +func (f initializerFunc) Initialize(ctx context.Context, config *InitializerConfig, ownerRef *metav1.OwnerReference) error { + return f(ctx, config, ownerRef) } // DefaultInitializer is an Initializer that initializes the configuration, variables and state resources // based on the given main, variables and tfvars content and on the given InitializerConfig. func DefaultInitializer(c client.Client, main, variables string, tfvars []byte, stateInitializer StateConfigMapInitializer) Initializer { - return initializerFunc(func(ctx context.Context, config *InitializerConfig) error { - if _, err := CreateOrUpdateConfigurationConfigMap(ctx, c, config.Namespace, config.ConfigurationName, main, variables); err != nil { + return initializerFunc(func(ctx context.Context, config *InitializerConfig, ownerRef *metav1.OwnerReference) error { + if _, err := CreateOrUpdateConfigurationConfigMap(ctx, c, config.Namespace, config.ConfigurationName, main, variables, ownerRef); err != nil { return err } - if _, err := CreateOrUpdateTFVarsSecret(ctx, c, config.Namespace, config.VariablesName, tfvars); err != nil { + if _, err := CreateOrUpdateTFVarsSecret(ctx, c, config.Namespace, config.VariablesName, tfvars, ownerRef); err != nil { return err } if config.InitializeState { - if err := stateInitializer.Initialize(ctx, c, config.Namespace, config.StateName); err != nil { + if err := stateInitializer.Initialize(ctx, c, config.Namespace, config.StateName, ownerRef); err != nil { return err } } diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/raw_state.go b/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/raw_state.go index 5b7ba4611..8d1bd5b5b 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/raw_state.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/raw_state.go @@ -30,7 +30,7 @@ func (trs *RawState) Marshal() ([]byte, error) { return json.Marshal(trs.encodeBase64()) } -// GetRawState returns the conten of terraform state config map +// GetRawState returns the content of terraform state config map func (t *terraformer) GetRawState(ctx context.Context) (*RawState, error) { configMap := &corev1.ConfigMap{} if err := t.client.Get(ctx, kutil.Key(t.namespace, t.stateName), configMap); err != nil { @@ -72,13 +72,13 @@ func UnmarshalRawState(rawState interface{}) (*RawState, error) { // buildRawState returns RawState from byte slice func buildRawState(terraformRawState []byte) (*RawState, error) { - trs := &RawState{} + trs := &RawState{ + Data: "", + Encoding: NoneEncoding, + } if terraformRawState == nil { - return &RawState{ - Data: "", - Encoding: NoneEncoding, - }, nil + return trs, nil } if err := json.Unmarshal(terraformRawState, trs); err != nil { diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/state.go b/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/state.go index c5917a8ad..feb7b296b 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/state.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/state.go @@ -164,13 +164,13 @@ func sniffJSONStateVersion(stateConfigMap []byte) (uint64, error) { } // Initialize implements StateConfigMapInitializer -func (f StateConfigMapInitializerFunc) Initialize(ctx context.Context, c client.Client, namespace, name string) error { - return f(ctx, c, namespace, name) +func (f StateConfigMapInitializerFunc) Initialize(ctx context.Context, c client.Client, namespace, name string, ownerRef *metav1.OwnerReference) error { + return f(ctx, c, namespace, name, ownerRef) } // CreateState create terraform state config map and use empty state. -// It does not create or update state ConfigMap if already exists. -func CreateState(ctx context.Context, c client.Client, namespace, name string) error { +// It does not create or update state ConfigMap if already exists, +func CreateState(ctx context.Context, c client.Client, namespace, name string, ownerRef *metav1.OwnerReference) error { configMap := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, Data: map[string]string{ @@ -178,6 +178,10 @@ func CreateState(ctx context.Context, c client.Client, namespace, name string) e }, } + if ownerRef != nil { + configMap.SetOwnerReferences(kutil.MergeOwnerReferences(configMap.OwnerReferences, *ownerRef)) + } + if err := c.Create(ctx, configMap); err != nil && !apierrors.IsAlreadyExists(err) { return err } @@ -186,7 +190,7 @@ func CreateState(ctx context.Context, c client.Client, namespace, name string) e } // Initialize implements StateConfigMapInitializer -func (cus CreateOrUpdateState) Initialize(ctx context.Context, c client.Client, namespace, name string) error { +func (cus CreateOrUpdateState) Initialize(ctx context.Context, c client.Client, namespace, name string, ownerRef *metav1.OwnerReference) error { if cus.State == nil { return fmt.Errorf("missing state when creating or updating terraform state ConfigMap %s/%s", namespace, name) } @@ -197,6 +201,10 @@ func (cus CreateOrUpdateState) Initialize(ctx context.Context, c client.Client, configMap.Data = make(map[string]string) } configMap.Data[StateKey] = *cus.State + + if ownerRef != nil { + configMap.SetOwnerReferences(kutil.MergeOwnerReferences(configMap.OwnerReferences, *ownerRef)) + } return nil }) diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/terraformer.go b/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/terraformer.go index 05c193733..291e6de17 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/terraformer.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/terraformer.go @@ -187,6 +187,12 @@ func (t *terraformer) execute(ctx context.Context, command string) error { } if !skipApplyOrDestroyPod { + // TODO: remove after several releases + // ensure ownerRef for already existing state configmaps + if err := t.ensureStateHasOwnerRef(ctx); err != nil { + return fmt.Errorf("failed to ensure owner reference for the state configmap: %w", err) + } + // Create Terraform Pod which executes the provided command generateName := t.computePodGenerateName(command) @@ -297,6 +303,25 @@ func (t *terraformer) createOrUpdateTerraformerAuth(ctx context.Context) error { return t.createOrUpdateRoleBinding(ctx) } +func (t *terraformer) ensureStateHasOwnerRef(ctx context.Context) error { + if t.ownerRef == nil { + return nil + } + + configMap := &corev1.ConfigMap{} + if err := t.client.Get(ctx, kutils.Key(t.namespace, t.stateName), configMap); err != nil { + return err + } + + oldConfigMap := configMap.DeepCopy() + configMap.SetOwnerReferences(kutils.MergeOwnerReferences(configMap.OwnerReferences, *t.ownerRef)) + + return t.client.Patch(ctx, configMap, client.MergeFromWithOptions( + oldConfigMap, + client.MergeFromWithOptimisticLock{}, + )) +} + func (t *terraformer) deployTerraformerPod(ctx context.Context, generateName, command string) (*corev1.Pod, error) { if err := t.createOrUpdateTerraformerAuth(ctx); err != nil { return nil, err diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/types.go b/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/types.go index cd40dcfc4..1c448ec24 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/types.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/terraformer/types.go @@ -20,6 +20,7 @@ import ( "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" @@ -30,6 +31,7 @@ import ( // * purpose is a one-word description depicting what the Terraformer does (e.g. 'infrastructure'). // * namespace is the namespace in which the Terraformer will act. // * image is the Docker image name of the Terraformer image. +// * ownerRef is the resource that owns the secrets and configmaps used by Terraformer // * configName is the name of the ConfigMap containing the main Terraform file ('main.tf'). // * variablesName is the name of the Secret containing the Terraform variables ('terraform.tfvars'). // * stateName is the name of the ConfigMap containing the Terraform state ('terraform.tfstate'). @@ -54,6 +56,7 @@ type terraformer struct { name string namespace string image string + ownerRef *metav1.OwnerReference configName string variablesName string @@ -101,6 +104,7 @@ type Terraformer interface { SetTerminationGracePeriodSeconds(int64) Terraformer SetDeadlineCleaning(time.Duration) Terraformer SetDeadlinePod(time.Duration) Terraformer + SetOwnerRef(*metav1.OwnerReference) Terraformer InitializeWith(ctx context.Context, initializer Initializer) Terraformer Apply(ctx context.Context) error Destroy(ctx context.Context) error @@ -117,7 +121,7 @@ type Terraformer interface { // Initializer can initialize a Terraformer. type Initializer interface { - Initialize(ctx context.Context, config *InitializerConfig) error + Initialize(ctx context.Context, config *InitializerConfig, ownerRef *metav1.OwnerReference) error } // Factory is a factory that can produce Terraformer and Initializer. @@ -129,11 +133,11 @@ type Factory interface { // StateConfigMapInitializer initialize terraformer state ConfigMap type StateConfigMapInitializer interface { - Initialize(ctx context.Context, c client.Client, namespace, name string) error + Initialize(ctx context.Context, c client.Client, namespace, name string, ownerRef *metav1.OwnerReference) error } // StateConfigMapInitializerFunc implements StateConfigMapInitializer -type StateConfigMapInitializerFunc func(ctx context.Context, c client.Client, namespace, name string) error +type StateConfigMapInitializerFunc func(ctx context.Context, c client.Client, namespace, name string, ownerRef *metav1.OwnerReference) error // CreateOrUpdateState implements StateConfigMapInitializer. // It use it field state for creating or updating the state ConfigMap diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/util/shoot.go b/vendor/github.com/gardener/gardener/extensions/pkg/util/shoot.go index 21b1b6aa6..3f55bcb60 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/util/shoot.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/util/shoot.go @@ -39,7 +39,7 @@ const CAChecksumAnnotation = "checksum/ca" // If the CA of an existing Kubeconfig has changed, it creates a new Kubeconfig. // Newly generated Kubeconfigs are applied with the given `client` to the given `namespace`. func GetOrCreateShootKubeconfig(ctx context.Context, c client.Client, certificateConfig secrets.CertificateSecretConfig, namespace string) (*corev1.Secret, error) { - caSecret, ca, err := secrets.LoadCAFromSecret(c, namespace, v1beta1constants.SecretNameCACluster) + caSecret, ca, err := secrets.LoadCAFromSecret(ctx, c, namespace, v1beta1constants.SecretNameCACluster) if err != nil { return nil, fmt.Errorf("error fetching CA secret %s/%s: %v", namespace, v1beta1constants.SecretNameCACluster, err) } diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/webhook/context/context.go b/vendor/github.com/gardener/gardener/extensions/pkg/webhook/context/context.go new file mode 100644 index 000000000..4aa26c6b5 --- /dev/null +++ b/vendor/github.com/gardener/gardener/extensions/pkg/webhook/context/context.go @@ -0,0 +1,63 @@ +// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package context + +import ( + "context" + + extensionscontroller "github.com/gardener/gardener/extensions/pkg/controller" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GardenContext wraps the actual context and cluster object. +type GardenContext interface { + GetCluster(ctx context.Context) (*extensionscontroller.Cluster, error) +} + +type gardenContext struct { + client client.Client + object metav1.Object + cluster *extensionscontroller.Cluster +} + +// NewGardenContext creates a context object. +func NewGardenContext(client client.Client, object metav1.Object) GardenContext { + return &gardenContext{ + client: client, + object: object, + } +} + +// NewInternalGardenContext creates a context object from a Cluster object. +func NewInternalGardenContext(cluster *extensionscontroller.Cluster) GardenContext { + return &gardenContext{ + cluster: cluster, + } +} + +// GetCluster returns the Cluster object. +func (c *gardenContext) GetCluster(ctx context.Context) (*extensionscontroller.Cluster, error) { + if c.cluster == nil { + cluster, err := extensionscontroller.GetCluster(ctx, c.client, c.object.GetNamespace()) + if err != nil { + return nil, errors.Wrapf(err, "could not get cluster for namespace '%s'", c.object.GetNamespace()) + } + c.cluster = cluster + } + return c.cluster, nil +} diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/webhook/controlplane/genericmutator/mutator.go b/vendor/github.com/gardener/gardener/extensions/pkg/webhook/controlplane/genericmutator/mutator.go index d483e09b8..6ca894442 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/webhook/controlplane/genericmutator/mutator.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/webhook/controlplane/genericmutator/mutator.go @@ -17,9 +17,9 @@ package genericmutator import ( "context" - extensionscontroller "github.com/gardener/gardener/extensions/pkg/controller" "github.com/gardener/gardener/extensions/pkg/controller/operatingsystemconfig/oscommon/cloudinit" extensionswebhook "github.com/gardener/gardener/extensions/pkg/webhook" + gcontext "github.com/gardener/gardener/extensions/pkg/webhook/context" "github.com/gardener/gardener/extensions/pkg/webhook/controlplane" v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" @@ -39,47 +39,42 @@ import ( "sigs.k8s.io/controller-runtime/pkg/runtime/inject" ) -// EnsurerContext wraps the actual context and cluster object. -type EnsurerContext interface { - GetCluster(ctx context.Context) (*extensionscontroller.Cluster, error) -} - // Ensurer ensures that various standard Kubernets controlplane objects conform to the provider requirements. // If they don't initially, they are mutated accordingly. type Ensurer interface { // EnsureKubeAPIServerService ensures that the kube-apiserver service conforms to the provider requirements. // "old" might be "nil" and must always be checked. - EnsureKubeAPIServerService(ctx context.Context, etcx EnsurerContext, new, old *corev1.Service) error + EnsureKubeAPIServerService(ctx context.Context, gctx gcontext.GardenContext, new, old *corev1.Service) error // EnsureKubeAPIServerDeployment ensures that the kube-apiserver deployment conforms to the provider requirements. // "old" might be "nil" and must always be checked. - EnsureKubeAPIServerDeployment(ctx context.Context, etcx EnsurerContext, new, old *appsv1.Deployment) error + EnsureKubeAPIServerDeployment(ctx context.Context, gctx gcontext.GardenContext, new, old *appsv1.Deployment) error // EnsureKubeControllerManagerDeployment ensures that the kube-controller-manager deployment conforms to the provider requirements. // "old" might be "nil" and must always be checked. - EnsureKubeControllerManagerDeployment(ctx context.Context, etcx EnsurerContext, new, old *appsv1.Deployment) error + EnsureKubeControllerManagerDeployment(ctx context.Context, gctx gcontext.GardenContext, new, old *appsv1.Deployment) error // EnsureKubeSchedulerDeployment ensures that the kube-scheduler deployment conforms to the provider requirements. // "old" might be "nil" and must always be checked. - EnsureKubeSchedulerDeployment(ctx context.Context, etcx EnsurerContext, new, old *appsv1.Deployment) error + EnsureKubeSchedulerDeployment(ctx context.Context, gctx gcontext.GardenContext, new, old *appsv1.Deployment) error // EnsureETCD ensures that the etcds conform to the respective provider requirements. // "old" might be "nil" and must always be checked. - EnsureETCD(ctx context.Context, etcx EnsurerContext, new, old *druidv1alpha1.Etcd) error + EnsureETCD(ctx context.Context, gctx gcontext.GardenContext, new, old *druidv1alpha1.Etcd) error // EnsureKubeletServiceUnitOptions ensures that the kubelet.service unit options conform to the provider requirements. - EnsureKubeletServiceUnitOptions(ctx context.Context, etcx EnsurerContext, new, old []*unit.UnitOption) ([]*unit.UnitOption, error) + EnsureKubeletServiceUnitOptions(ctx context.Context, gctx gcontext.GardenContext, new, old []*unit.UnitOption) ([]*unit.UnitOption, error) // EnsureKubeletConfiguration ensures that the kubelet configuration conforms to the provider requirements. // "old" might be "nil" and must always be checked. - EnsureKubeletConfiguration(ctx context.Context, etcx EnsurerContext, new, old *kubeletconfigv1beta1.KubeletConfiguration) error + EnsureKubeletConfiguration(ctx context.Context, gctx gcontext.GardenContext, new, old *kubeletconfigv1beta1.KubeletConfiguration) error // EnsureKubernetesGeneralConfiguration ensures that the kubernetes general configuration conforms to the provider requirements. // "old" might be "nil" and must always be checked. - EnsureKubernetesGeneralConfiguration(ctx context.Context, etcx EnsurerContext, new, old *string) error + EnsureKubernetesGeneralConfiguration(ctx context.Context, gctx gcontext.GardenContext, new, old *string) error // ShouldProvisionKubeletCloudProviderConfig returns true if the cloud provider config file should be added to the kubelet configuration. - ShouldProvisionKubeletCloudProviderConfig(ctx context.Context, etcx EnsurerContext) bool + ShouldProvisionKubeletCloudProviderConfig(ctx context.Context, gctx gcontext.GardenContext) bool // EnsureKubeletCloudProviderConfig ensures that the cloud provider config file content conforms to the provider requirements. - EnsureKubeletCloudProviderConfig(context.Context, EnsurerContext, *string, string) error + EnsureKubeletCloudProviderConfig(context.Context, gcontext.GardenContext, *string, string) error // EnsureAdditionalUnits ensures additional systemd units // "old" might be "nil" and must always be checked. - EnsureAdditionalUnits(ctx context.Context, etcx EnsurerContext, new, old *[]extensionsv1alpha1.Unit) error + EnsureAdditionalUnits(ctx context.Context, gctx gcontext.GardenContext, new, old *[]extensionsv1alpha1.Unit) error // EnsureAdditionalFile ensures additional systemd files // "old" might be "nil" and must always be checked. - EnsureAdditionalFiles(ctx context.Context, etcx EnsurerContext, new, old *[]extensionsv1alpha1.File) error + EnsureAdditionalFiles(ctx context.Context, gctx gcontext.GardenContext, new, old *[]extensionsv1alpha1.File) error } // NewMutator creates a new controlplane mutator. @@ -118,39 +113,6 @@ func (m *mutator) InjectClient(client client.Client) error { return nil } -type ensurerContext struct { - client client.Client - object metav1.Object - cluster *extensionscontroller.Cluster -} - -// NewEnsurerContext creates an ensurer context object. -func NewEnsurerContext(client client.Client, object metav1.Object) EnsurerContext { - return &ensurerContext{ - client: client, - object: object, - } -} - -// NewInternalEnsurerContext creates an ensurer context object. -func NewInternalEnsurerContext(cluster *extensionscontroller.Cluster) EnsurerContext { - return &ensurerContext{ - cluster: cluster, - } -} - -// GetCluster returns the cluster object. -func (c *ensurerContext) GetCluster(ctx context.Context) (*extensionscontroller.Cluster, error) { - if c.cluster == nil { - cluster, err := extensionscontroller.GetCluster(ctx, c.client, c.object.GetNamespace()) - if err != nil { - return nil, errors.Wrapf(err, "could not get cluster for namespace '%s'", c.object.GetNamespace()) - } - c.cluster = cluster - } - return c.cluster, nil -} - // Mutate validates and if needed mutates the given object. func (m *mutator) Mutate(ctx context.Context, new, old runtime.Object) error { acc, err := meta.Accessor(new) @@ -165,7 +127,7 @@ func (m *mutator) Mutate(ctx context.Context, new, old runtime.Object) error { if !ok { return errors.Wrapf(err, "could not cast runtime object to metav1 object") } - ectx := NewEnsurerContext(m.client, o) + gctx := gcontext.NewGardenContext(m.client, o) switch x := new.(type) { case *corev1.Service: @@ -180,7 +142,7 @@ func (m *mutator) Mutate(ctx context.Context, new, old runtime.Object) error { } extensionswebhook.LogMutation(m.logger, x.Kind, x.Namespace, x.Name) - return m.ensurer.EnsureKubeAPIServerService(ctx, ectx, x, oldSvc) + return m.ensurer.EnsureKubeAPIServerService(ctx, gctx, x, oldSvc) } case *appsv1.Deployment: var oldDep *appsv1.Deployment @@ -194,13 +156,13 @@ func (m *mutator) Mutate(ctx context.Context, new, old runtime.Object) error { switch x.Name { case v1beta1constants.DeploymentNameKubeAPIServer: extensionswebhook.LogMutation(m.logger, x.Kind, x.Namespace, x.Name) - return m.ensurer.EnsureKubeAPIServerDeployment(ctx, ectx, x, oldDep) + return m.ensurer.EnsureKubeAPIServerDeployment(ctx, gctx, x, oldDep) case v1beta1constants.DeploymentNameKubeControllerManager: extensionswebhook.LogMutation(m.logger, x.Kind, x.Namespace, x.Name) - return m.ensurer.EnsureKubeControllerManagerDeployment(ctx, ectx, x, oldDep) + return m.ensurer.EnsureKubeControllerManagerDeployment(ctx, gctx, x, oldDep) case v1beta1constants.DeploymentNameKubeScheduler: extensionswebhook.LogMutation(m.logger, x.Kind, x.Namespace, x.Name) - return m.ensurer.EnsureKubeSchedulerDeployment(ctx, ectx, x, oldDep) + return m.ensurer.EnsureKubeSchedulerDeployment(ctx, gctx, x, oldDep) } case *druidv1alpha1.Etcd: switch x.Name { @@ -214,7 +176,7 @@ func (m *mutator) Mutate(ctx context.Context, new, old runtime.Object) error { } extensionswebhook.LogMutation(m.logger, x.Kind, x.Namespace, x.Name) - return m.ensurer.EnsureETCD(ctx, ectx, x, oldEtcd) + return m.ensurer.EnsureETCD(ctx, gctx, x, oldEtcd) } case *extensionsv1alpha1.OperatingSystemConfig: if x.Spec.Purpose == extensionsv1alpha1.OperatingSystemConfigPurposeReconcile { @@ -227,7 +189,7 @@ func (m *mutator) Mutate(ctx context.Context, new, old runtime.Object) error { } extensionswebhook.LogMutation(m.logger, x.Kind, x.Namespace, x.Name) - return m.mutateOperatingSystemConfig(ctx, ectx, x, oldOSC) + return m.mutateOperatingSystemConfig(ctx, gctx, x, oldOSC) } return nil } @@ -262,31 +224,31 @@ func findFileWithPath(osc *extensionsv1alpha1.OperatingSystemConfig, path string return nil } -func (m *mutator) mutateOperatingSystemConfig(ctx context.Context, ectx EnsurerContext, osc, oldOSC *extensionsv1alpha1.OperatingSystemConfig) error { +func (m *mutator) mutateOperatingSystemConfig(ctx context.Context, gctx gcontext.GardenContext, osc, oldOSC *extensionsv1alpha1.OperatingSystemConfig) error { // Mutate kubelet.service unit, if present if content := getKubeletService(osc); content != nil { - if err := m.ensureKubeletServiceUnitContent(ctx, ectx, content, getKubeletService(oldOSC)); err != nil { + if err := m.ensureKubeletServiceUnitContent(ctx, gctx, content, getKubeletService(oldOSC)); err != nil { return err } } // Mutate kubelet configuration file, if present if content := getKubeletConfigFile(osc); content != nil { - if err := m.ensureKubeletConfigFileContent(ctx, ectx, content, getKubeletConfigFile(oldOSC)); err != nil { + if err := m.ensureKubeletConfigFileContent(ctx, gctx, content, getKubeletConfigFile(oldOSC)); err != nil { return err } } // Mutate 99 kubernetes general configuration file, if present if content := getKubernetesGeneralConfiguration(osc); content != nil { - if err := m.ensureKubernetesGeneralConfiguration(ctx, ectx, content, getKubernetesGeneralConfiguration(oldOSC)); err != nil { + if err := m.ensureKubernetesGeneralConfiguration(ctx, gctx, content, getKubernetesGeneralConfiguration(oldOSC)); err != nil { return err } } // Check if cloud provider config needs to be ensured - if m.ensurer.ShouldProvisionKubeletCloudProviderConfig(ctx, ectx) { - if err := m.ensureKubeletCloudProviderConfig(ctx, ectx, osc); err != nil { + if m.ensurer.ShouldProvisionKubeletCloudProviderConfig(ctx, gctx) { + if err := m.ensureKubeletCloudProviderConfig(ctx, gctx, osc); err != nil { return err } } @@ -301,18 +263,18 @@ func (m *mutator) mutateOperatingSystemConfig(ctx context.Context, ectx EnsurerC oldUnits = &oldOSC.Spec.Units } - if err := m.ensurer.EnsureAdditionalFiles(ctx, ectx, &osc.Spec.Files, oldFiles); err != nil { + if err := m.ensurer.EnsureAdditionalFiles(ctx, gctx, &osc.Spec.Files, oldFiles); err != nil { return err } - if err := m.ensurer.EnsureAdditionalUnits(ctx, ectx, &osc.Spec.Units, oldUnits); err != nil { + if err := m.ensurer.EnsureAdditionalUnits(ctx, gctx, &osc.Spec.Units, oldUnits); err != nil { return err } return nil } -func (m *mutator) ensureKubeletServiceUnitContent(ctx context.Context, ectx EnsurerContext, content, oldContent *string) error { +func (m *mutator) ensureKubeletServiceUnitContent(ctx context.Context, gctx gcontext.GardenContext, content, oldContent *string) error { var ( opts, oldOpts []*unit.UnitOption err error @@ -330,7 +292,7 @@ func (m *mutator) ensureKubeletServiceUnitContent(ctx context.Context, ectx Ensu } } - if opts, err = m.ensurer.EnsureKubeletServiceUnitOptions(ctx, ectx, opts, oldOpts); err != nil { + if opts, err = m.ensurer.EnsureKubeletServiceUnitOptions(ctx, gctx, opts, oldOpts); err != nil { return err } @@ -342,7 +304,7 @@ func (m *mutator) ensureKubeletServiceUnitContent(ctx context.Context, ectx Ensu return nil } -func (m *mutator) ensureKubeletConfigFileContent(ctx context.Context, ectx EnsurerContext, fci, oldFCI *extensionsv1alpha1.FileContentInline) error { +func (m *mutator) ensureKubeletConfigFileContent(ctx context.Context, gctx gcontext.GardenContext, fci, oldFCI *extensionsv1alpha1.FileContentInline) error { var ( kubeletConfig, oldKubeletConfig *kubeletconfigv1beta1.KubeletConfiguration err error @@ -360,7 +322,7 @@ func (m *mutator) ensureKubeletConfigFileContent(ctx context.Context, ectx Ensur } } - if err = m.ensurer.EnsureKubeletConfiguration(ctx, ectx, kubeletConfig, oldKubeletConfig); err != nil { + if err = m.ensurer.EnsureKubeletConfiguration(ctx, gctx, kubeletConfig, oldKubeletConfig); err != nil { return err } @@ -374,7 +336,7 @@ func (m *mutator) ensureKubeletConfigFileContent(ctx context.Context, ectx Ensur return nil } -func (m *mutator) ensureKubernetesGeneralConfiguration(ctx context.Context, ectx EnsurerContext, fci, oldFCI *extensionsv1alpha1.FileContentInline) error { +func (m *mutator) ensureKubernetesGeneralConfiguration(ctx context.Context, gctx gcontext.GardenContext, fci, oldFCI *extensionsv1alpha1.FileContentInline) error { var ( data, oldData []byte err error @@ -394,7 +356,7 @@ func (m *mutator) ensureKubernetesGeneralConfiguration(ctx context.Context, ectx s := string(data) oldS := string(oldData) - if err = m.ensurer.EnsureKubernetesGeneralConfiguration(ctx, ectx, &s, &oldS); err != nil { + if err = m.ensurer.EnsureKubernetesGeneralConfiguration(ctx, gctx, &s, &oldS); err != nil { return err } @@ -410,12 +372,12 @@ func (m *mutator) ensureKubernetesGeneralConfiguration(ctx context.Context, ectx const CloudProviderConfigPath = "/var/lib/kubelet/cloudprovider.conf" -func (m *mutator) ensureKubeletCloudProviderConfig(ctx context.Context, ectx EnsurerContext, osc *extensionsv1alpha1.OperatingSystemConfig) error { +func (m *mutator) ensureKubeletCloudProviderConfig(ctx context.Context, gctx gcontext.GardenContext, osc *extensionsv1alpha1.OperatingSystemConfig) error { var err error // Ensure kubelet cloud provider config var s string - if err = m.ensurer.EnsureKubeletCloudProviderConfig(ctx, ectx, &s, osc.Namespace); err != nil { + if err = m.ensurer.EnsureKubeletCloudProviderConfig(ctx, gctx, &s, osc.Namespace); err != nil { return err } diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/webhook/controlplane/genericmutator/noopensurer.go b/vendor/github.com/gardener/gardener/extensions/pkg/webhook/controlplane/genericmutator/noopensurer.go index 928cb0b67..92bd0439e 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/webhook/controlplane/genericmutator/noopensurer.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/webhook/controlplane/genericmutator/noopensurer.go @@ -19,6 +19,7 @@ import ( "github.com/coreos/go-systemd/v22/unit" druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1" + gcontext "github.com/gardener/gardener/extensions/pkg/webhook/context" extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -31,61 +32,61 @@ type NoopEnsurer struct{} var _ Ensurer = &NoopEnsurer{} // EnsureKubeAPIServerService ensures that the kube-apiserver service conforms to the provider requirements. -func (e *NoopEnsurer) EnsureKubeAPIServerService(ctx context.Context, ectx EnsurerContext, new, old *corev1.Service) error { +func (e *NoopEnsurer) EnsureKubeAPIServerService(ctx context.Context, gctx gcontext.GardenContext, new, old *corev1.Service) error { return nil } // EnsureKubeAPIServerDeployment ensures that the kube-apiserver deployment conforms to the provider requirements. -func (e *NoopEnsurer) EnsureKubeAPIServerDeployment(ctx context.Context, ectx EnsurerContext, new, old *appsv1.Deployment) error { +func (e *NoopEnsurer) EnsureKubeAPIServerDeployment(ctx context.Context, gctx gcontext.GardenContext, new, old *appsv1.Deployment) error { return nil } // EnsureKubeControllerManagerDeployment ensures that the kube-controller-manager deployment conforms to the provider requirements. -func (e *NoopEnsurer) EnsureKubeControllerManagerDeployment(ctx context.Context, ectx EnsurerContext, new, old *appsv1.Deployment) error { +func (e *NoopEnsurer) EnsureKubeControllerManagerDeployment(ctx context.Context, gctx gcontext.GardenContext, new, old *appsv1.Deployment) error { return nil } // EnsureKubeSchedulerDeployment ensures that the kube-scheduler deployment conforms to the provider requirements. -func (e *NoopEnsurer) EnsureKubeSchedulerDeployment(ctx context.Context, ectx EnsurerContext, new, old *appsv1.Deployment) error { +func (e *NoopEnsurer) EnsureKubeSchedulerDeployment(ctx context.Context, gctx gcontext.GardenContext, new, old *appsv1.Deployment) error { return nil } // EnsureETCD ensures that the etcd stateful sets conform to the provider requirements. -func (e *NoopEnsurer) EnsureETCD(ctx context.Context, ectx EnsurerContext, new, old *druidv1alpha1.Etcd) error { +func (e *NoopEnsurer) EnsureETCD(ctx context.Context, gctx gcontext.GardenContext, new, old *druidv1alpha1.Etcd) error { return nil } // EnsureKubeletServiceUnitOptions ensures that the kubelet.service unit options conform to the provider requirements. -func (e *NoopEnsurer) EnsureKubeletServiceUnitOptions(ctx context.Context, ectx EnsurerContext, new, old []*unit.UnitOption) ([]*unit.UnitOption, error) { +func (e *NoopEnsurer) EnsureKubeletServiceUnitOptions(ctx context.Context, gctx gcontext.GardenContext, new, old []*unit.UnitOption) ([]*unit.UnitOption, error) { return new, nil } // EnsureKubeletConfiguration ensures that the kubelet configuration conforms to the provider requirements. -func (e *NoopEnsurer) EnsureKubeletConfiguration(ctx context.Context, etcx EnsurerContext, new, old *kubeletconfigv1beta1.KubeletConfiguration) error { +func (e *NoopEnsurer) EnsureKubeletConfiguration(ctx context.Context, gctx gcontext.GardenContext, new, old *kubeletconfigv1beta1.KubeletConfiguration) error { return nil } // EnsureKubernetesGeneralConfiguration ensures that the kubernetes general configuration conforms to the provider requirements. -func (e *NoopEnsurer) EnsureKubernetesGeneralConfiguration(ctx context.Context, etcx EnsurerContext, new, old *string) error { +func (e *NoopEnsurer) EnsureKubernetesGeneralConfiguration(ctx context.Context, gctx gcontext.GardenContext, new, old *string) error { return nil } // ShouldProvisionKubeletCloudProviderConfig returns if the cloud provider config file should be added to the kubelet configuration. -func (e *NoopEnsurer) ShouldProvisionKubeletCloudProviderConfig(context.Context, EnsurerContext) bool { +func (e *NoopEnsurer) ShouldProvisionKubeletCloudProviderConfig(context.Context, gcontext.GardenContext) bool { return false } // EnsureKubeletCloudProviderConfig ensures that the cloud provider config file conforms to the provider requirements. -func (e *NoopEnsurer) EnsureKubeletCloudProviderConfig(context.Context, EnsurerContext, *string, string) error { +func (e *NoopEnsurer) EnsureKubeletCloudProviderConfig(context.Context, gcontext.GardenContext, *string, string) error { return nil } // EnsureAdditionalUnits ensures that additional required system units are added. -func (e *NoopEnsurer) EnsureAdditionalUnits(ctx context.Context, ectx EnsurerContext, new, old *[]extensionsv1alpha1.Unit) error { +func (e *NoopEnsurer) EnsureAdditionalUnits(ctx context.Context, gctx gcontext.GardenContext, new, old *[]extensionsv1alpha1.Unit) error { return nil } // EnsureAdditionalFiles ensures that additional required system files are added. -func (e *NoopEnsurer) EnsureAdditionalFiles(ctx context.Context, ectx EnsurerContext, new, old *[]extensionsv1alpha1.File) error { +func (e *NoopEnsurer) EnsureAdditionalFiles(ctx context.Context, gctx gcontext.GardenContext, new, old *[]extensionsv1alpha1.File) error { return nil } diff --git a/vendor/github.com/gardener/gardener/extensions/pkg/webhook/registration.go b/vendor/github.com/gardener/gardener/extensions/pkg/webhook/registration.go index c576df0a3..4e3ac2d3c 100644 --- a/vendor/github.com/gardener/gardener/extensions/pkg/webhook/registration.go +++ b/vendor/github.com/gardener/gardener/extensions/pkg/webhook/registration.go @@ -19,10 +19,15 @@ import ( "fmt" "strings" + "github.com/gardener/gardener/pkg/utils/kubernetes" + "github.com/pkg/errors" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -72,7 +77,20 @@ func RegisterWebhooks(ctx context.Context, mgr manager.Manager, namespace, provi return nil, nil, err } + var ownerReference *metav1.OwnerReference + if len(namespace) > 0 { + ns := &corev1.Namespace{} + if err := c.Get(ctx, client.ObjectKey{Name: namespace}, ns); err != nil { + return nil, nil, err + } + ownerReference = metav1.NewControllerRef(ns, corev1.SchemeGroupVersion.WithKind("Namespace")) + ownerReference.BlockOwnerDeletion = pointer.BoolPtr(false) + } + if _, err := controllerutil.CreateOrUpdate(ctx, c, mutatingWebhookConfigurationSeed, func() error { + if ownerReference != nil { + mutatingWebhookConfigurationSeed.SetOwnerReferences(kubernetes.MergeOwnerReferences(mutatingWebhookConfigurationSeed.GetOwnerReferences(), *ownerReference)) + } mutatingWebhookConfigurationSeed.Webhooks = webhooksToRegisterSeed return nil }); err != nil { diff --git a/vendor/github.com/gardener/gardener/hack/hook-me.sh b/vendor/github.com/gardener/gardener/hack/hook-me.sh index 8506f7482..0ec645876 100755 --- a/vendor/github.com/gardener/gardener/hack/hook-me.sh +++ b/vendor/github.com/gardener/gardener/hack/hook-me.sh @@ -27,7 +27,7 @@ providerName=${2:-} [[ -z $providerName ]] && echo "Please specify the provider name (aws,gcp,azure,..etc.)!" && exit 1 tmpService=$(mktemp) -kubectl get svc gardener-extension-provider-$providerName -o yaml --export > $tmpService +kubectl get svc gardener-extension-provider-$providerName -o yaml > $tmpService cat < ./hack/hook-me.sh " - echo "> \`make EXTENSION_NAMESPACE= start\`" + echo "> ./hack/hook-me.sh " + echo "> \`make EXTENSION_NAMESPACE= WEBHOOK_CONFIG_MODE=service start\`" echo "==================================================================================================================================" echo "" @@ -294,7 +294,7 @@ if [[ "${BASH_SOURCE[0]}" = "$0" ]]; then createOrUpdateWebhookSVC $namespace $providerName echo "[STEP 7] Initializing the inlets client"; - echo "[Info] Inlets initialized, you are ready to go ahead and run \"make EXTENSION_NAMESPACE=$namespace start\"" + echo "[Info] Inlets initialized, you are ready to go ahead and run \"make EXTENSION_NAMESPACE=$namespace WEBHOOK_CONFIG_MODE=service start\"" echo "[Info] It will take about 5 seconds for the connection to succeeed!" inlets client --remote ws://$loadbalancerIPOrHostName:8000 --upstream https://localhost:$webhookServerPort --token=21d809ed61915c9177fbceeaa87e307e766be5f2 diff --git a/vendor/github.com/gardener/gardener/hack/test-cover.sh b/vendor/github.com/gardener/gardener/hack/test-cover.sh index a3e4f08bd..5262ef3c6 100755 --- a/vendor/github.com/gardener/gardener/hack/test-cover.sh +++ b/vendor/github.com/gardener/gardener/hack/test-cover.sh @@ -19,7 +19,7 @@ source "$(dirname $0)/setup-envtest.sh" echo "> Test Cover" -GO111MODULE=on ginkgo -cover -race -mod=vendor $@ +GO111MODULE=on ginkgo -cover -timeout=2m -race -mod=vendor $@ REPO_ROOT="$(git rev-parse --show-toplevel)" COVERPROFILE="$REPO_ROOT/test.coverprofile" diff --git a/vendor/github.com/gardener/gardener/pkg/api/extensions/utils.go b/vendor/github.com/gardener/gardener/pkg/api/extensions/utils.go index b9475f0f0..5d05c3fd9 100644 --- a/vendor/github.com/gardener/gardener/pkg/api/extensions/utils.go +++ b/vendor/github.com/gardener/gardener/pkg/api/extensions/utils.go @@ -19,16 +19,20 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) -// GetShootNamespacedCRsLists returns an empty CR list struct, for each CR used for Shoot managment +// GetShootNamespacedCRsLists returns an empty CR list struct, for each CR used for Shoot management func GetShootNamespacedCRsLists() []runtime.Object { return []runtime.Object{ - &extensionsv1alpha1.ControlPlaneList{}, - &extensionsv1alpha1.ExtensionList{}, - &extensionsv1alpha1.InfrastructureList{}, + //The ControlPlane CR is now handled as a shoot component + //&extensionsv1alpha1.ControlPlaneList{}, + //The Extension CR is now handled as a shoot component + //&extensionsv1alpha1.ExtensionList{}, + //The Infrastructure CR is now handled as a shoot component + //&extensionsv1alpha1.InfrastructureList{}, //The Network CR is now handled as a shoot component //&extensionsv1alpha1.NetworkList{}, &extensionsv1alpha1.OperatingSystemConfigList{}, - &extensionsv1alpha1.WorkerList{}, + //The Worker CR is now handled as a shoot component + //&extensionsv1alpha1.WorkerList{}, //The ContainerRuntime CR is now handled as a shoot component //&extensionsv1alpha1.ContainerRuntimeList{}, } diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_project.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_project.go index 7e0ee7ad6..88063036c 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types_project.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_project.go @@ -63,7 +63,7 @@ type ProjectSpec struct { // Namespace is the name of the namespace that has been created for the Project object. // A nil value means that Gardener will determine the name of the namespace. Namespace *string - // Tolerations contains the default tolerations and a whitelist for taints on seed clusters. + // Tolerations contains the default tolerations and a list for allowed taints on seed clusters. Tolerations *ProjectTolerations } diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go b/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go index eb5ae1339..bf2b700db 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/types_shoot.go @@ -162,7 +162,7 @@ const ( // NginxIngress describes configuration values for the nginx-ingress addon. type NginxIngress struct { Addon - // LoadBalancerSourceRanges is list of whitelist IP sources for NginxIngress + // LoadBalancerSourceRanges is list of allowed IP sources for NginxIngress LoadBalancerSourceRanges []string // Config contains custom configuration for the nginx-ingress-controller configuration. // See https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md#configuration-options diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.proto b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.proto index 822cfe2b6..c8bb879ef 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.proto +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/generated.proto @@ -1288,7 +1288,7 @@ message Networking { message NginxIngress { optional Addon addon = 4; - // LoadBalancerSourceRanges is list of whitelist IP sources for NginxIngress + // LoadBalancerSourceRanges is list of allowed IP sources for NginxIngress // +optional repeated string loadBalancerSourceRanges = 1; diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shoot.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shoot.go index 012618974..72f4fbf92 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shoot.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1alpha1/types_shoot.go @@ -193,7 +193,7 @@ const ( // NginxIngress describes configuration values for the nginx-ingress addon. type NginxIngress struct { Addon `json:",inline" protobuf:"bytes,4,opt,name=addon"` - // LoadBalancerSourceRanges is list of whitelist IP sources for NginxIngress + // LoadBalancerSourceRanges is list of allowed IP sources for NginxIngress // +optional LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,1,rep,name=loadBalancerSourceRanges"` // Config contains custom configuration for the nginx-ingress-controller configuration. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto index 937df1ede..aebca8214 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/generated.proto @@ -1252,7 +1252,7 @@ message Networking { message NginxIngress { optional Addon addon = 1; - // LoadBalancerSourceRanges is list of whitelist IP sources for NginxIngress + // LoadBalancerSourceRanges is list of allowed IP sources for NginxIngress // +optional repeated string loadBalancerSourceRanges = 2; diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/errors.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/errors.go index 087ee5212..2694936a6 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/errors.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/helper/errors.go @@ -54,8 +54,8 @@ var ( quotaExceededRegexp = regexp.MustCompile(`(?i)(LimitExceeded|Quota|Throttling|Too many requests)`) insufficientPrivilegesRegexp = regexp.MustCompile(`(?i)(AccessDenied|OperationNotAllowed|Error 403)`) dependenciesRegexp = regexp.MustCompile(`(?i)(PendingVerification|Access Not Configured|accessNotConfigured|DependencyViolation|OptInRequired|DeleteConflict|Conflict|inactive billing state|ReadOnlyDisabledSubscription|is already being used|InUseSubnetCannotBeDeleted|VnetInUse|InUseRouteTableCannotBeDeleted|timeout while waiting for state to become|InvalidCidrBlock|already busy for|InsufficientFreeAddressesInSubnet|InternalServerError|RetryableError|Future#WaitForCompletion: context has been cancelled|internalerror|internal server error|A resource with the ID|VnetAddressSpaceCannotChangeDueToPeerings)`) - resourcesDepletedRegexp = regexp.MustCompile(`(?i)(not available in the current hardware cluster|InsufficientInstanceCapacity|SkuNotAvailable|ZonalAllocationFailed)`) - configurationProblemRegexp = regexp.MustCompile(`(?i)(AzureBastionSubnet|not supported in your requested Availability Zone|InvalidParameter|InvalidParameterValue|not found|notFound|NetcfgInvalidSubnet|InvalidSubnet|Invalid value|KubeletHasInsufficientMemory|KubeletHasDiskPressure|KubeletHasInsufficientPID|violates constraint|no attached internet gateway found|Your query returned no results|PrivateEndpointNetworkPoliciesCannotBeEnabledOnPrivateEndpointSubnet|invalid VPC attributes|PrivateLinkServiceNetworkPoliciesCannotBeEnabledOnPrivateLinkServiceSubnet)`) + resourcesDepletedRegexp = regexp.MustCompile(`(?i)(not available in the current hardware cluster|InsufficientInstanceCapacity|SkuNotAvailable|ZonalAllocationFailed|out of stock)`) + configurationProblemRegexp = regexp.MustCompile(`(?i)(AzureBastionSubnet|not supported in your requested Availability Zone|InvalidParameter|InvalidParameterValue|not found|notFound|NetcfgInvalidSubnet|InvalidSubnet|Invalid value|KubeletHasInsufficientMemory|KubeletHasDiskPressure|KubeletHasInsufficientPID|violates constraint|no attached internet gateway found|Your query returned no results|PrivateEndpointNetworkPoliciesCannotBeEnabledOnPrivateEndpointSubnet|invalid VPC attributes|PrivateLinkServiceNetworkPoliciesCannotBeEnabledOnPrivateLinkServiceSubnet|unrecognized feature gate|runtime-config invalid key)`) ) // DetermineError determines the Garden error code for the given error and creates a new error with the given message. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go index 908cb06e7..6d797360f 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/v1beta1/types_shoot.go @@ -190,7 +190,7 @@ const ( // NginxIngress describes configuration values for the nginx-ingress addon. type NginxIngress struct { Addon `json:",inline" protobuf:"bytes,1,opt,name=addon"` - // LoadBalancerSourceRanges is list of whitelist IP sources for NginxIngress + // LoadBalancerSourceRanges is list of allowed IP sources for NginxIngress // +optional LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,2,rep,name=loadBalancerSourceRanges"` // Config contains custom configuration for the nginx-ingress-controller configuration. diff --git a/vendor/github.com/gardener/gardener/pkg/apis/core/validation/project.go b/vendor/github.com/gardener/gardener/pkg/apis/core/validation/project.go index 71cf2b210..758b9b01f 100644 --- a/vendor/github.com/gardener/gardener/pkg/apis/core/validation/project.go +++ b/vendor/github.com/gardener/gardener/pkg/apis/core/validation/project.go @@ -119,7 +119,7 @@ func ValidateProjectSpec(projectSpec *core.ProjectSpec, fldPath *field.Path) fie if projectSpec.Tolerations != nil { allErrs = append(allErrs, ValidateTolerations(projectSpec.Tolerations.Defaults, fldPath.Child("tolerations", "defaults"))...) allErrs = append(allErrs, ValidateTolerations(projectSpec.Tolerations.Whitelist, fldPath.Child("tolerations", "whitelist"))...) - allErrs = append(allErrs, ValidateTolerationsAgainstWhitelist(projectSpec.Tolerations.Defaults, projectSpec.Tolerations.Whitelist, fldPath.Child("tolerations", "defaults"))...) + allErrs = append(allErrs, ValidateTolerationsAgainstAllowlist(projectSpec.Tolerations.Defaults, projectSpec.Tolerations.Whitelist, fldPath.Child("tolerations", "defaults"))...) } return allErrs @@ -228,21 +228,21 @@ func ValidateTolerations(tolerations []core.Toleration, fldPath *field.Path) fie return allErrs } -// ValidateTolerationsAgainstWhitelist validates the given tolerations against the given whitelist. -func ValidateTolerationsAgainstWhitelist(tolerations, whitelist []core.Toleration, fldPath *field.Path) field.ErrorList { +// ValidateTolerationsAgainstAllowlist validates the given tolerations against the given allowlist. +func ValidateTolerationsAgainstAllowlist(tolerations, allowlist []core.Toleration, fldPath *field.Path) field.ErrorList { var ( allErrs field.ErrorList allowedTolerations = sets.NewString() ) - for _, toleration := range whitelist { + for _, toleration := range allowlist { allowedTolerations.Insert(utils.IDForKeyWithOptionalValue(toleration.Key, toleration.Value)) } for i, toleration := range tolerations { id := utils.IDForKeyWithOptionalValue(toleration.Key, toleration.Value) if !allowedTolerations.Has(utils.IDForKeyWithOptionalValue(toleration.Key, nil)) && !allowedTolerations.Has(id) { - allErrs = append(allErrs, field.Forbidden(fldPath.Index(i), fmt.Sprintf("whitelist only allows using these tolerations: %+v", allowedTolerations.UnsortedList()))) + allErrs = append(allErrs, field.Forbidden(fldPath.Index(i), fmt.Sprintf("only the following tolerations are allowed: %+v", allowedTolerations.UnsortedList()))) } } diff --git a/vendor/github.com/gardener/gardener/pkg/features/features.go b/vendor/github.com/gardener/gardener/pkg/features/features.go index b88e60194..96f45c63b 100644 --- a/vendor/github.com/gardener/gardener/pkg/features/features.go +++ b/vendor/github.com/gardener/gardener/pkg/features/features.go @@ -83,4 +83,9 @@ const ( // owner: @stoyanr // alpha: v1.12.0 SeedChange featuregate.Feature = "SeedChange" + + // SeedKubeScheduler adds an additional kube-scheduler in seed clusters where the feature is enabled. + // owner: @mvladev + // alpha: v1.15.0 + SeedKubeScheduler featuregate.Feature = "SeedKubeScheduler" ) diff --git a/vendor/github.com/gardener/gardener/pkg/gardenlet/features/features.go b/vendor/github.com/gardener/gardener/pkg/gardenlet/features/features.go index f063679f4..faab26d96 100644 --- a/vendor/github.com/gardener/gardener/pkg/gardenlet/features/features.go +++ b/vendor/github.com/gardener/gardener/pkg/gardenlet/features/features.go @@ -34,6 +34,7 @@ var ( features.CachedRuntimeClients: {Default: false, PreRelease: featuregate.Alpha}, features.NodeLocalDNS: {Default: false, PreRelease: featuregate.Alpha}, features.MountHostCADirectories: {Default: false, PreRelease: featuregate.Alpha}, + features.SeedKubeScheduler: {Default: false, PreRelease: featuregate.Alpha}, } ) diff --git a/vendor/github.com/gardener/gardener/pkg/mock/gardener/extensions/terraformer/mocks.go b/vendor/github.com/gardener/gardener/pkg/mock/gardener/extensions/terraformer/mocks.go index df8687bbb..9840a3c47 100644 --- a/vendor/github.com/gardener/gardener/pkg/mock/gardener/extensions/terraformer/mocks.go +++ b/vendor/github.com/gardener/gardener/pkg/mock/gardener/extensions/terraformer/mocks.go @@ -13,7 +13,8 @@ import ( logr "github.com/go-logr/logr" gomock "github.com/golang/mock/gomock" v1 "k8s.io/api/core/v1" - v10 "k8s.io/client-go/kubernetes/typed/core/v1" + v10 "k8s.io/apimachinery/pkg/apis/meta/v1" + v11 "k8s.io/client-go/kubernetes/typed/core/v1" rest "k8s.io/client-go/rest" client "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -265,6 +266,20 @@ func (mr *MockTerraformerMockRecorder) SetLogLevel(arg0 interface{}) *gomock.Cal return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLogLevel", reflect.TypeOf((*MockTerraformer)(nil).SetLogLevel), arg0) } +// SetOwnerRef mocks base method. +func (m *MockTerraformer) SetOwnerRef(arg0 *v10.OwnerReference) terraformer.Terraformer { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetOwnerRef", arg0) + ret0, _ := ret[0].(terraformer.Terraformer) + return ret0 +} + +// SetOwnerRef indicates an expected call of SetOwnerRef. +func (mr *MockTerraformerMockRecorder) SetOwnerRef(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetOwnerRef", reflect.TypeOf((*MockTerraformer)(nil).SetOwnerRef), arg0) +} + // SetTerminationGracePeriodSeconds mocks base method. func (m *MockTerraformer) SetTerminationGracePeriodSeconds(arg0 int64) terraformer.Terraformer { m.ctrl.T.Helper() @@ -331,17 +346,17 @@ func (m *MockInitializer) EXPECT() *MockInitializerMockRecorder { } // Initialize mocks base method. -func (m *MockInitializer) Initialize(arg0 context.Context, arg1 *terraformer.InitializerConfig) error { +func (m *MockInitializer) Initialize(arg0 context.Context, arg1 *terraformer.InitializerConfig, arg2 *v10.OwnerReference) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Initialize", arg0, arg1) + ret := m.ctrl.Call(m, "Initialize", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // Initialize indicates an expected call of Initialize. -func (mr *MockInitializerMockRecorder) Initialize(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockInitializerMockRecorder) Initialize(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockInitializer)(nil).Initialize), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockInitializer)(nil).Initialize), arg0, arg1, arg2) } // MockFactory is a mock of Factory interface. @@ -382,7 +397,7 @@ func (mr *MockFactoryMockRecorder) DefaultInitializer(arg0, arg1, arg2, arg3, ar } // New mocks base method. -func (m *MockFactory) New(arg0 logr.Logger, arg1 client.Client, arg2 v10.CoreV1Interface, arg3, arg4, arg5, arg6 string) terraformer.Terraformer { +func (m *MockFactory) New(arg0 logr.Logger, arg1 client.Client, arg2 v11.CoreV1Interface, arg3, arg4, arg5, arg6 string) terraformer.Terraformer { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "New", arg0, arg1, arg2, arg3, arg4, arg5, arg6) ret0, _ := ret[0].(terraformer.Terraformer) diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/addons.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/addons.go index 77af9d2c9..b492f1b75 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/addons.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/addons.go @@ -196,11 +196,11 @@ func (b *Botanist) GenerateNginxIngressConfig() (map[string]interface{}, error) // DeployManagedResources deploys all the ManagedResource CRDs for the gardener-resource-manager. func (b *Botanist) DeployManagedResources(ctx context.Context) error { - for name, chartRenderFunc := range map[string]func() (*chartrenderer.RenderedChart, error){ + for name, chartRenderFunc := range map[string]func(context.Context) (*chartrenderer.RenderedChart, error){ common.ManagedResourceShootCoreName: b.generateCoreAddonsChart, common.ManagedResourceAddonsName: b.generateOptionalAddonsChart, } { - renderedChart, err := chartRenderFunc() + renderedChart, err := chartRenderFunc(ctx) if err != nil { return fmt.Errorf("error rendering %q chart: %+v", name, err) } @@ -300,7 +300,7 @@ func (b *Botanist) deleteStaleSecretsMatchLabel(ctx context.Context, labels map[ // generateCoreAddonsChart renders the gardener-resource-manager configuration for the core addons. After that it // creates a ManagedResource CRD that references the rendered manifests and creates it. -func (b *Botanist) generateCoreAddonsChart() (*chartrenderer.RenderedChart, error) { +func (b *Botanist) generateCoreAddonsChart(ctx context.Context) (*chartrenderer.RenderedChart, error) { var ( kasFQDN = b.outOfClusterAPIServerFQDN() kubeProxySecret = b.Secrets["kube-proxy"] @@ -498,13 +498,13 @@ func (b *Botanist) generateCoreAddonsChart() (*chartrenderer.RenderedChart, erro values["konnectivity-agent"] = common.GenerateAddonConfig(konnectivityAgent, true) // TODO: remove when konnectivity tunnel is the default tunneling method for all shoots. - secret, err := common.GetSecretFromSecretRef(context.TODO(), shootClient, &corev1.SecretReference{Namespace: metav1.NamespaceSystem, Name: "vpn-shoot"}) + secret, err := common.GetSecretFromSecretRef(ctx, shootClient, &corev1.SecretReference{Namespace: metav1.NamespaceSystem, Name: "vpn-shoot"}) if err != nil && !apierrors.IsNotFound(err) { return nil, err } if secret != nil { - if err := b.K8sShootClient.Client().Delete(context.TODO(), secret); err != nil { + if err := b.K8sShootClient.Client().Delete(ctx, secret); err != nil { return nil, err } } @@ -549,7 +549,7 @@ func (b *Botanist) generateCoreAddonsChart() (*chartrenderer.RenderedChart, erro // generateOptionalAddonsChart renders the gardener-resource-manager chart for the optional addons. After that it // creates a ManagedResource CRD that references the rendered manifests and creates it. -func (b *Botanist) generateOptionalAddonsChart() (*chartrenderer.RenderedChart, error) { +func (b *Botanist) generateOptionalAddonsChart(_ context.Context) (*chartrenderer.RenderedChart, error) { global := map[string]interface{}{ "vpaEnabled": b.Shoot.WantsVerticalPodAutoscaler, } diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/botanist.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/botanist.go index 133743e67..f8dd9e119 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/botanist.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/botanist.go @@ -46,11 +46,10 @@ const ( // New takes an operation object and creates a new Botanist object. It checks whether the given Shoot DNS // domain is covered by a default domain, and if so, it sets the attribute on the Botanist // object. -func New(o *operation.Operation) (*Botanist, error) { +func New(ctx context.Context, o *operation.Operation) (*Botanist, error) { var ( b = &Botanist{Operation: o} err error - ctx = context.TODO() ) // Determine all default domain secrets and check whether the used Shoot domain matches a default domain. @@ -69,11 +68,12 @@ func New(o *operation.Operation) (*Botanist, error) { } } - if err = b.InitializeSeedClients(); err != nil { + if err = b.InitializeSeedClients(ctx); err != nil { return nil, err } // extension components + o.Shoot.Components.Extensions.ContainerRuntime = b.DefaultContainerRuntime(b.K8sSeedClient.DirectClient()) o.Shoot.Components.Extensions.ControlPlane = b.DefaultControlPlane(b.K8sSeedClient.DirectClient(), extensionsv1alpha1.Normal) o.Shoot.Components.Extensions.ControlPlaneExposure = b.DefaultControlPlane(b.K8sSeedClient.DirectClient(), extensionsv1alpha1.Exposure) o.Shoot.Components.Extensions.DNS.ExternalProvider = b.DefaultExternalDNSProvider(b.K8sSeedClient.DirectClient()) @@ -88,9 +88,10 @@ func New(o *operation.Operation) (*Botanist, error) { if err != nil { return nil, err } + o.Shoot.Components.Extensions.Extension = b.DefaultExtension(b.K8sSeedClient.DirectClient()) o.Shoot.Components.Extensions.Infrastructure = b.DefaultInfrastructure(b.K8sSeedClient.DirectClient()) o.Shoot.Components.Extensions.Network = b.DefaultNetwork(b.K8sSeedClient.DirectClient()) - o.Shoot.Components.Extensions.ContainerRuntime = b.DefaultContainerRuntime(b.K8sSeedClient.DirectClient()) + o.Shoot.Components.Extensions.Worker = b.DefaultWorker(b.K8sSeedClient.DirectClient()) sniPhase, err := b.SNIPhase(ctx) if err != nil { diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/clusterautoscaler.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/clusterautoscaler.go index bec413e8b..87ab20763 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/clusterautoscaler.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/clusterautoscaler.go @@ -46,7 +46,7 @@ func (b *Botanist) DeployClusterAutoscaler(ctx context.Context) error { Kubeconfig: component.Secret{Name: clusterautoscaler.SecretName, Checksum: b.CheckSums[clusterautoscaler.SecretName]}, }) b.Shoot.Components.ControlPlane.ClusterAutoscaler.SetNamespaceUID(b.SeedNamespaceObject.UID) - b.Shoot.Components.ControlPlane.ClusterAutoscaler.SetMachineDeployments(b.Shoot.MachineDeployments) + b.Shoot.Components.ControlPlane.ClusterAutoscaler.SetMachineDeployments(b.Shoot.Components.Extensions.Worker.MachineDeployments()) return b.Shoot.Components.ControlPlane.ClusterAutoscaler.Deploy(ctx) } diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane.go index 6f154ff1f..9c47da6c6 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane.go @@ -166,8 +166,7 @@ func (b *Botanist) DeployVerticalPodAutoscaler(ctx context.Context) error { "recommender": recommender, "updater": updater, "deploymentLabels": map[string]interface{}{ - v1beta1constants.GardenRole: v1beta1constants.GardenRoleControlPlane, - v1beta1constants.DeprecatedGardenRole: v1beta1constants.GardenRoleControlPlane, + v1beta1constants.GardenRole: v1beta1constants.GardenRoleControlPlane, }, "clusterType": "shoot", } @@ -699,7 +698,7 @@ func (b *Botanist) DeployKubeAPIServer(ctx context.Context) error { foundDeployment := true deployment := &appsv1.Deployment{} - if err := b.K8sSeedClient.Client().Get(context.TODO(), kutil.Key(b.Shoot.SeedNamespace, v1beta1constants.DeploymentNameKubeAPIServer), deployment); err != nil && !apierrors.IsNotFound(err) { + if err := b.K8sSeedClient.Client().Get(ctx, kutil.Key(b.Shoot.SeedNamespace, v1beta1constants.DeploymentNameKubeAPIServer), deployment); err != nil && !apierrors.IsNotFound(err) { return err } else if apierrors.IsNotFound(err) { foundDeployment = false @@ -795,7 +794,7 @@ func (b *Botanist) DeployKubeAPIServer(ctx context.Context) error { } if signingKeySecret := serviceAccountConfig.SigningKeySecret; signingKeySecret != nil { - signingKey, err := common.GetServiceAccountSigningKeySecret(context.TODO(), b.K8sGardenClient.Client(), b.Shoot.Info.Namespace, signingKeySecret.Name) + signingKey, err := common.GetServiceAccountSigningKeySecret(ctx, b.K8sGardenClient.Client(), b.Shoot.Info.Namespace, signingKeySecret.Name) if err != nil { return err } @@ -828,7 +827,7 @@ func (b *Botanist) DeployKubeAPIServer(ctx context.Context) error { apiServerConfig.AuditConfig.AuditPolicy != nil && apiServerConfig.AuditConfig.AuditPolicy.ConfigMapRef != nil { - auditPolicy, err := b.getAuditPolicy(apiServerConfig.AuditConfig.AuditPolicy.ConfigMapRef.Name, b.Shoot.Info.Namespace) + auditPolicy, err := b.getAuditPolicy(ctx, apiServerConfig.AuditConfig.AuditPolicy.ConfigMapRef.Name, b.Shoot.Info.Namespace) if err != nil { // Ignore missing audit configuration on shoot deletion to prevent failing redeployments of the // kube-apiserver in case the end-user deleted the configmap before/simultaneously to the shoot @@ -920,9 +919,9 @@ func (b *Botanist) DeployKubeAPIServer(ctx context.Context) error { return b.K8sSeedClient.ChartApplier().Apply(ctx, filepath.Join(chartPathControlPlane, v1beta1constants.DeploymentNameKubeAPIServer), b.Shoot.SeedNamespace, v1beta1constants.DeploymentNameKubeAPIServer, kubernetes.Values(values)) } -func (b *Botanist) getAuditPolicy(name, namespace string) (string, error) { +func (b *Botanist) getAuditPolicy(ctx context.Context, name, namespace string) (string, error) { auditPolicyCm := &corev1.ConfigMap{} - if err := b.K8sGardenClient.Client().Get(context.TODO(), kutil.Key(namespace, name), auditPolicyCm); err != nil { + if err := b.K8sGardenClient.Client().Get(ctx, kutil.Key(namespace, name), auditPolicyCm); err != nil { return "", err } auditPolicy, ok := auditPolicyCm.Data[auditPolicyConfigMapDataKey] diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/clusterautoscaler/cluster_autoscaler.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/clusterautoscaler/cluster_autoscaler.go index 5b0eb882b..9597a9a41 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/clusterautoscaler/cluster_autoscaler.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/clusterautoscaler/cluster_autoscaler.go @@ -166,8 +166,7 @@ func (c *clusterAutoscaler) Deploy(ctx context.Context) error { if _, err := controllerutil.CreateOrUpdate(ctx, c.client, deployment, func() error { deployment.Labels = utils.MergeStringMaps(getLabels(), map[string]string{ - v1beta1constants.GardenRole: v1beta1constants.GardenRoleControlPlane, - v1beta1constants.DeprecatedGardenRole: v1beta1constants.GardenRoleControlPlane, + v1beta1constants.GardenRole: v1beta1constants.GardenRoleControlPlane, }) deployment.Spec.Replicas = &c.replicas deployment.Spec.RevisionHistoryLimit = pointer.Int32Ptr(0) diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/etcd/etcd.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/etcd/etcd.go index 62966e967..81a648e9e 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/etcd/etcd.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/etcd/etcd.go @@ -280,9 +280,8 @@ func (e *etcd) Deploy(ctx context.Context) error { v1beta1constants.GardenerTimestamp: TimeNow().UTC().String(), } etcd.Labels = map[string]string{ - v1beta1constants.LabelRole: e.role, - v1beta1constants.GardenRole: v1beta1constants.GardenRoleControlPlane, - v1beta1constants.DeprecatedGardenRole: v1beta1constants.GardenRoleControlPlane, + v1beta1constants.LabelRole: e.role, + v1beta1constants.GardenRole: v1beta1constants.GardenRoleControlPlane, } etcd.Spec.Replicas = replicas etcd.Spec.PriorityClassName = pointer.StringPtr(v1beta1constants.PriorityClassNameShootControlPlane) diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/etcd/waiter.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/etcd/waiter.go index dcea505c9..27da38684 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/etcd/waiter.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/etcd/waiter.go @@ -54,7 +54,7 @@ func WaitUntilEtcdsReady( ctx, etcdList, client.InNamespace(namespace), - client.MatchingLabels{v1beta1constants.DeprecatedGardenRole: v1beta1constants.GardenRoleControlPlane}, + client.MatchingLabels{v1beta1constants.GardenRole: v1beta1constants.GardenRoleControlPlane}, ); err != nil { return retry.SevereError(err) } diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/kubecontrollermanager/kube_controller_manager.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/kubecontrollermanager/kube_controller_manager.go index ac8596442..ee425bf80 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/kubecontrollermanager/kube_controller_manager.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/kubecontrollermanager/kube_controller_manager.go @@ -169,8 +169,7 @@ func (k *kubeControllerManager) Deploy(ctx context.Context) error { if _, err := controllerutil.CreateOrUpdate(ctx, k.seedClient, deployment, func() error { deployment.Labels = utils.MergeStringMaps(getLabels(), map[string]string{ - v1beta1constants.GardenRole: v1beta1constants.GardenRoleControlPlane, - v1beta1constants.DeprecatedGardenRole: v1beta1constants.GardenRoleControlPlane, + v1beta1constants.GardenRole: v1beta1constants.GardenRoleControlPlane, }) deployment.Spec.Replicas = &k.replicas deployment.Spec.RevisionHistoryLimit = pointer.Int32Ptr(0) diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/kubescheduler/kube_scheduler.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/kubescheduler/kube_scheduler.go index 635495626..47b0aedea 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/kubescheduler/kube_scheduler.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/controlplane/kubescheduler/kube_scheduler.go @@ -165,8 +165,7 @@ func (k *kubeScheduler) Deploy(ctx context.Context) error { if _, err := controllerutil.CreateOrUpdate(ctx, k.client, deployment, func() error { deployment.Labels = utils.MergeStringMaps(getLabels(), map[string]string{ - v1beta1constants.GardenRole: v1beta1constants.GardenRoleControlPlane, - v1beta1constants.DeprecatedGardenRole: v1beta1constants.GardenRoleControlPlane, + v1beta1constants.GardenRole: v1beta1constants.GardenRoleControlPlane, }) deployment.Spec.Replicas = &k.replicas deployment.Spec.RevisionHistoryLimit = pointer.Int32Ptr(0) diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/extension.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/extension.go index 2b2ef2c27..086064b49 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/extension.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/extension.go @@ -1,4 +1,4 @@ -// Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,130 +16,32 @@ package botanist import ( "context" - "time" - v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" - extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" - "github.com/gardener/gardener/pkg/operation/common" - "github.com/gardener/gardener/pkg/operation/shoot" - "github.com/gardener/gardener/pkg/utils/flow" + "github.com/gardener/gardener/pkg/operation/botanist/extensions/extension" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/client" ) -// DeployExtensionResources creates the `Extension` extension resource in the shoot namespace in the seed -// cluster. Gardener waits until an external controller did reconcile the cluster successfully. -func (b *Botanist) DeployExtensionResources(ctx context.Context) error { - var ( - restorePhase = b.isRestorePhase() - gardenerOperation = v1beta1constants.GardenerOperationReconcile - ) - - if restorePhase { - gardenerOperation = v1beta1constants.GardenerOperationWaitForState - } - - fns := make([]flow.TaskFn, 0, len(b.Shoot.Extensions)) - for _, extension := range b.Shoot.Extensions { - var ( - extensionType = extension.Spec.Type - providerConfig = extension.Spec.ProviderConfig - toApply = extensionsv1alpha1.Extension{ - ObjectMeta: metav1.ObjectMeta{ - Name: extension.Name, - Namespace: extension.Namespace, - }, - } - ) - - fns = append(fns, func(ctx context.Context) error { - _, err := controllerutil.CreateOrUpdate(ctx, b.K8sSeedClient.Client(), &toApply, func() error { - metav1.SetMetaDataAnnotation(&toApply.ObjectMeta, v1beta1constants.GardenerOperation, gardenerOperation) - metav1.SetMetaDataAnnotation(&toApply.ObjectMeta, v1beta1constants.GardenerTimestamp, time.Now().UTC().String()) - toApply.Spec.Type = extensionType - toApply.Spec.ProviderConfig = providerConfig - return nil - }) - - if restorePhase { - return b.restoreExtensionObject(ctx, &toApply, extensionsv1alpha1.ExtensionResource) - } - - return err - }) - } - - return flow.Parallel(fns...)(ctx) -} - -// WaitUntilExtensionResourcesReady waits until all extension resources report `Succeeded` in their last operation state. -// The state must be reported before the passed context is cancelled or an extension's timeout has been reached. -// As soon as one timeout has been overstepped the function returns an error, further waits on extensions will be aborted. -func (b *Botanist) WaitUntilExtensionResourcesReady(ctx context.Context) error { - fns := make([]flow.TaskFn, 0, len(b.Shoot.Extensions)) - for _, extension := range b.Shoot.Extensions { - fns = append(fns, func(ctx context.Context) error { - return common.WaitUntilExtensionCRReady( - ctx, - b.K8sSeedClient.DirectClient(), - b.Logger, - func() runtime.Object { return &extensionsv1alpha1.Extension{} }, - "Extension", - extension.Namespace, - extension.Name, - DefaultInterval, - DefaultSevereThreshold, - extension.Timeout, - nil, - ) - }) - } - - return flow.ParallelExitOnError(fns...)(ctx) -} - -// DeleteStaleExtensionResources deletes unused extensions from the shoot namespace in the seed. -func (b *Botanist) DeleteStaleExtensionResources(ctx context.Context) error { - wantedExtensionTypes := sets.NewString() - for _, extension := range b.Shoot.Extensions { - wantedExtensionTypes.Insert(extension.Spec.Type) - } - return b.deleteExtensionResources(ctx, wantedExtensionTypes) -} - -// DeleteAllExtensionResources deletes all extension resources from the Shoot namespace in the Seed. -func (b *Botanist) DeleteAllExtensionResources(ctx context.Context) error { - return b.deleteExtensionResources(ctx, sets.NewString()) -} - -func (b *Botanist) deleteExtensionResources(ctx context.Context, wantedExtensionTypes sets.String) error { - return common.DeleteExtensionCRs( - ctx, - b.K8sSeedClient.Client(), - &extensionsv1alpha1.ExtensionList{}, - func() extensionsv1alpha1.Object { return &extensionsv1alpha1.Extension{} }, - b.Shoot.SeedNamespace, - func(obj extensionsv1alpha1.Object) bool { - return !wantedExtensionTypes.Has(obj.GetExtensionSpec().GetExtensionType()) +// DefaultExtension creates the default deployer for the Extension custom resources. +func (b *Botanist) DefaultExtension(seedClient client.Client) extension.Interface { + return extension.New( + b.Logger, + seedClient, + &extension.Values{ + Namespace: b.Shoot.SeedNamespace, + Extensions: b.Shoot.Extensions, }, + extension.DefaultInterval, + extension.DefaultSevereThreshold, + extension.DefaultTimeout, ) } -// WaitUntilExtensionResourcesDeleted waits until all extension resources are gone or the context is cancelled. -func (b *Botanist) WaitUntilExtensionResourcesDeleted(ctx context.Context) error { - return common.WaitUntilExtensionCRsDeleted( - ctx, - b.K8sSeedClient.DirectClient(), - b.Logger, - &extensionsv1alpha1.ExtensionList{}, - func() extensionsv1alpha1.Object { return &extensionsv1alpha1.Extension{} }, - "Extension", - b.Shoot.SeedNamespace, - DefaultInterval, - shoot.ExtensionDefaultTimeout, - nil, - ) +// DeployExtensions deploys the Extension custom resources and triggers the restore operation in case +// the Shoot is in the restore phase of the control plane migration. +func (b *Botanist) DeployExtensions(ctx context.Context) error { + if b.isRestorePhase() { + return b.Shoot.Components.Extensions.Extension.Restore(ctx, b.ShootState) + } + return b.Shoot.Components.Extensions.Extension.Deploy(ctx) } diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/extensions/controlplane/controlplane.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/extensions/controlplane/controlplane.go index 4c489b6a6..d29eb9076 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/extensions/controlplane/controlplane.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/extensions/controlplane/controlplane.go @@ -96,50 +96,50 @@ type controlPlane struct { providerStatus *runtime.RawExtension } -func (i *controlPlane) name() string { - if i.values.Purpose == extensionsv1alpha1.Exposure { - return i.values.Name + "-exposure" +func (c *controlPlane) name() string { + if c.values.Purpose == extensionsv1alpha1.Exposure { + return c.values.Name + "-exposure" } - return i.values.Name + return c.values.Name } // Deploy uses the seed client to create or update the ControlPlane resource. -func (i *controlPlane) Deploy(ctx context.Context) error { - _, err := i.deploy(ctx, v1beta1constants.GardenerOperationReconcile) +func (c *controlPlane) Deploy(ctx context.Context) error { + _, err := c.deploy(ctx, v1beta1constants.GardenerOperationReconcile) return err } -func (i *controlPlane) deploy(ctx context.Context, operation string) (extensionsv1alpha1.Object, error) { +func (c *controlPlane) deploy(ctx context.Context, operation string) (extensionsv1alpha1.Object, error) { var ( controlPlane = &extensionsv1alpha1.ControlPlane{ ObjectMeta: metav1.ObjectMeta{ - Name: i.name(), - Namespace: i.values.Namespace, + Name: c.name(), + Namespace: c.values.Namespace, }, } providerConfig *runtime.RawExtension ) - if cfg := i.values.ProviderConfig; cfg != nil { + if cfg := c.values.ProviderConfig; cfg != nil { providerConfig = &runtime.RawExtension{Raw: cfg.Raw} } - _, err := controllerutil.CreateOrUpdate(ctx, i.client, controlPlane, func() error { + _, err := controllerutil.CreateOrUpdate(ctx, c.client, controlPlane, func() error { metav1.SetMetaDataAnnotation(&controlPlane.ObjectMeta, v1beta1constants.GardenerOperation, operation) metav1.SetMetaDataAnnotation(&controlPlane.ObjectMeta, v1beta1constants.GardenerTimestamp, TimeNow().UTC().String()) controlPlane.Spec = extensionsv1alpha1.ControlPlaneSpec{ DefaultSpec: extensionsv1alpha1.DefaultSpec{ - Type: i.values.Type, + Type: c.values.Type, ProviderConfig: providerConfig, }, - Region: i.values.Region, - Purpose: &i.values.Purpose, + Region: c.values.Region, + Purpose: &c.values.Purpose, SecretRef: corev1.SecretReference{ Name: v1beta1constants.SecretNameCloudProvider, Namespace: controlPlane.Namespace, }, - InfrastructureProviderStatus: i.values.InfrastructureProviderStatus, + InfrastructureProviderStatus: c.values.InfrastructureProviderStatus, } return nil @@ -149,98 +149,98 @@ func (i *controlPlane) deploy(ctx context.Context, operation string) (extensions } // Restore uses the seed client and the ShootState to create the ControlPlane resources and restore their state. -func (i *controlPlane) Restore(ctx context.Context, shootState *gardencorev1alpha1.ShootState) error { +func (c *controlPlane) Restore(ctx context.Context, shootState *gardencorev1alpha1.ShootState) error { return common.RestoreExtensionWithDeployFunction( ctx, shootState, - i.client, + c.client, extensionsv1alpha1.ControlPlaneResource, - i.values.Namespace, - i.deploy, + c.values.Namespace, + c.deploy, ) } // Migrate migrates the ControlPlane resources. -func (i *controlPlane) Migrate(ctx context.Context) error { +func (c *controlPlane) Migrate(ctx context.Context) error { return common.MigrateExtensionCRs( ctx, - i.client, + c.client, &extensionsv1alpha1.ControlPlaneList{}, func() extensionsv1alpha1.Object { return &extensionsv1alpha1.ControlPlane{} }, - i.values.Namespace, + c.values.Namespace, ) } // Destroy deletes the ControlPlane resource. -func (i *controlPlane) Destroy(ctx context.Context) error { +func (c *controlPlane) Destroy(ctx context.Context) error { return common.DeleteExtensionCR( ctx, - i.client, + c.client, func() extensionsv1alpha1.Object { return &extensionsv1alpha1.ControlPlane{} }, - i.values.Namespace, - i.name(), + c.values.Namespace, + c.name(), ) } // Wait waits until the ControlPlane resource is ready. -func (i *controlPlane) Wait(ctx context.Context) error { +func (c *controlPlane) Wait(ctx context.Context) error { return common.WaitUntilExtensionCRReady( ctx, - i.client, - i.logger, + c.client, + c.logger, func() runtime.Object { return &extensionsv1alpha1.ControlPlane{} }, extensionsv1alpha1.ControlPlaneResource, - i.values.Namespace, - i.name(), - i.waitInterval, - i.waitSevereThreshold, - i.waitTimeout, + c.values.Namespace, + c.name(), + c.waitInterval, + c.waitSevereThreshold, + c.waitTimeout, func(obj runtime.Object) error { controlPlane, ok := obj.(*extensionsv1alpha1.ControlPlane) if !ok { return fmt.Errorf("expected extensionsv1alpha1.ControlPlane but got %T", controlPlane) } - i.providerStatus = controlPlane.Status.ProviderStatus + c.providerStatus = controlPlane.Status.ProviderStatus return nil }, ) } // WaitMigrate waits until the ControlPlane resources are migrated successfully. -func (i *controlPlane) WaitMigrate(ctx context.Context) error { +func (c *controlPlane) WaitMigrate(ctx context.Context) error { return common.WaitUntilExtensionCRMigrated( ctx, - i.client, + c.client, func() extensionsv1alpha1.Object { return &extensionsv1alpha1.ControlPlane{} }, - i.values.Namespace, - i.name(), - i.waitInterval, - i.waitTimeout, + c.values.Namespace, + c.name(), + c.waitInterval, + c.waitTimeout, ) } // WaitCleanup waits until the ControlPlane resource is deleted. -func (i *controlPlane) WaitCleanup(ctx context.Context) error { +func (c *controlPlane) WaitCleanup(ctx context.Context) error { return common.WaitUntilExtensionCRDeleted( ctx, - i.client, - i.logger, + c.client, + c.logger, func() extensionsv1alpha1.Object { return &extensionsv1alpha1.ControlPlane{} }, extensionsv1alpha1.ControlPlaneResource, - i.values.Namespace, - i.name(), - i.waitInterval, - i.waitTimeout, + c.values.Namespace, + c.name(), + c.waitInterval, + c.waitTimeout, ) } // SetInfrastructureProviderStatus sets the infrastructure provider status in the values. -func (i *controlPlane) SetInfrastructureProviderStatus(status *runtime.RawExtension) { - i.values.InfrastructureProviderStatus = status +func (c *controlPlane) SetInfrastructureProviderStatus(status *runtime.RawExtension) { + c.values.InfrastructureProviderStatus = status } // ProviderStatus returns the generated status of the provider. -func (i *controlPlane) ProviderStatus() *runtime.RawExtension { - return i.providerStatus +func (c *controlPlane) ProviderStatus() *runtime.RawExtension { + return c.providerStatus } diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/extensions/extension/extension.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/extensions/extension/extension.go new file mode 100644 index 000000000..0918a2649 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/extensions/extension/extension.go @@ -0,0 +1,250 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package extension + +import ( + "context" + "time" + + gardencorev1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" + "github.com/gardener/gardener/pkg/operation/botanist/component" + "github.com/gardener/gardener/pkg/operation/common" + "github.com/gardener/gardener/pkg/utils/flow" + + "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + // DefaultInterval is the default interval for retry operations. + DefaultInterval = 5 * time.Second + // DefaultSevereThreshold is the default threshold until an error reported by another component is treated as 'severe'. + DefaultSevereThreshold = 30 * time.Second + // DefaultTimeout is the default timeout and defines how long Gardener should wait + // for a successful reconciliation of an Extension resource. + DefaultTimeout = 3 * time.Minute +) + +// TimeNow returns the current time. Exposed for testing. +var TimeNow = time.Now + +// Interface contains references to an Extension deployer. +type Interface interface { + component.DeployMigrateWaiter + DeleteStaleResources(ctx context.Context) error +} + +// Extension contains information about the desired Extension resources as well as configuration information. +type Extension struct { + extensionsv1alpha1.Extension + Timeout time.Duration +} + +// Values contains the values used to create an Extension resources. +type Values struct { + Namespace string + Extensions map[string]Extension +} + +type extension struct { + values *Values + client client.Client + logger logrus.FieldLogger + waitInterval time.Duration + waitSevereThreshold time.Duration + waitTimeout time.Duration +} + +// New creates a new instance of Extension deployer. +func New( + logger logrus.FieldLogger, + client client.Client, + values *Values, + waitInterval time.Duration, + waitSevereThreshold time.Duration, + waitTimeout time.Duration, +) Interface { + return &extension{ + values: values, + client: client, + logger: logger, + waitInterval: waitInterval, + waitSevereThreshold: waitSevereThreshold, + waitTimeout: waitTimeout, + } +} + +// Deploy uses the seed client to create or update the Extension resources. +func (e *extension) Deploy(ctx context.Context) error { + fns := e.forEach(func(ctx context.Context, extension extensionsv1alpha1.Extension, _ time.Duration) error { + deployer := &deployer{e.client, extension} + + _, err := deployer.deploy(ctx, v1beta1constants.GardenerOperationReconcile) + return err + }) + + return flow.Parallel(fns...)(ctx) +} + +// Destroy deletes all the Extension resources. +func (e *extension) Destroy(ctx context.Context) error { + return e.deleteExtensionResources(ctx, sets.NewString()) +} + +// Wait waits until the Extension resources are ready. +func (e *extension) Wait(ctx context.Context) error { + fns := e.forEach(func(ctx context.Context, extension extensionsv1alpha1.Extension, timeout time.Duration) error { + return common.WaitUntilExtensionCRReady( + ctx, + e.client, + e.logger, + func() runtime.Object { return &extensionsv1alpha1.Extension{} }, + extensionsv1alpha1.ExtensionResource, + extension.Namespace, + extension.Name, + e.waitInterval, + e.waitSevereThreshold, + timeout, + nil, + ) + }) + + return flow.ParallelExitOnError(fns...)(ctx) +} + +// WaitCleanup waits until the Extension resources are cleaned up. +func (e *extension) WaitCleanup(ctx context.Context) error { + return common.WaitUntilExtensionCRsDeleted( + ctx, + e.client, + e.logger, + &extensionsv1alpha1.ExtensionList{}, + func() extensionsv1alpha1.Object { return &extensionsv1alpha1.Extension{} }, + extensionsv1alpha1.ExtensionResource, + e.values.Namespace, + e.waitInterval, + e.waitTimeout, + nil, + ) +} + +// Restore uses the seed client and the ShootState to create the Extension resources and restore their state. +func (e *extension) Restore(ctx context.Context, shootState *gardencorev1alpha1.ShootState) error { + fns := e.forEach(func(ctx context.Context, extension extensionsv1alpha1.Extension, _ time.Duration) error { + deployer := &deployer{e.client, extension} + + return common.RestoreExtensionWithDeployFunction( + ctx, + shootState, + e.client, + extensionsv1alpha1.ExtensionResource, + e.values.Namespace, + deployer.deploy, + ) + }) + + return flow.Parallel(fns...)(ctx) +} + +// Migrate migrates the Extension resources. +func (e *extension) Migrate(ctx context.Context) error { + return common.MigrateExtensionCRs( + ctx, + e.client, + &extensionsv1alpha1.ExtensionList{}, + func() extensionsv1alpha1.Object { return &extensionsv1alpha1.Extension{} }, + e.values.Namespace, + ) +} + +// WaitMigrate waits until the Extension resources are migrated successfully. +func (e *extension) WaitMigrate(ctx context.Context) error { + return common.WaitUntilExtensionCRsMigrated( + ctx, + e.client, + &extensionsv1alpha1.ExtensionList{}, + func() extensionsv1alpha1.Object { return &extensionsv1alpha1.Extension{} }, + e.values.Namespace, + e.waitInterval, + e.waitTimeout, + ) +} + +// DeleteStaleResources deletes unused Extension resources from the shoot namespace in the seed. +func (e *extension) DeleteStaleResources(ctx context.Context) error { + wantedExtensionTypes := sets.NewString() + for _, extension := range e.values.Extensions { + wantedExtensionTypes.Insert(extension.Spec.Type) + } + return e.deleteExtensionResources(ctx, wantedExtensionTypes) +} + +func (e *extension) deleteExtensionResources(ctx context.Context, wantedExtensionTypes sets.String) error { + return common.DeleteExtensionCRs( + ctx, + e.client, + &extensionsv1alpha1.ExtensionList{}, + func() extensionsv1alpha1.Object { return &extensionsv1alpha1.Extension{} }, + e.values.Namespace, + func(obj extensionsv1alpha1.Object) bool { + return !wantedExtensionTypes.Has(obj.GetExtensionSpec().GetExtensionType()) + }, + ) +} + +func (e *extension) forEach(fn func(context.Context, extensionsv1alpha1.Extension, time.Duration) error) []flow.TaskFn { + fns := make([]flow.TaskFn, 0, len(e.values.Extensions)) + + for _, ext := range e.values.Extensions { + obj := ext.Extension + timeout := ext.Timeout + + fns = append(fns, func(ctx context.Context) error { + return fn(ctx, obj, timeout) + }) + } + + return fns +} + +type deployer struct { + client client.Client + obj extensionsv1alpha1.Extension +} + +func (d *deployer) deploy(ctx context.Context, operation string) (extensionsv1alpha1.Object, error) { + extension := &extensionsv1alpha1.Extension{ + ObjectMeta: metav1.ObjectMeta{ + Name: d.obj.Name, + Namespace: d.obj.Namespace, + }, + } + + _, err := controllerutil.CreateOrUpdate(ctx, d.client, extension, func() error { + metav1.SetMetaDataAnnotation(&extension.ObjectMeta, v1beta1constants.GardenerOperation, operation) + metav1.SetMetaDataAnnotation(&extension.ObjectMeta, v1beta1constants.GardenerTimestamp, TimeNow().UTC().String()) + extension.Spec.Type = d.obj.Spec.Type + extension.Spec.ProviderConfig = d.obj.Spec.ProviderConfig + return nil + }) + + return extension, err +} diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/extensions/infrastructure/infrastructure.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/extensions/infrastructure/infrastructure.go index 9ed339a68..47756eb21 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/extensions/infrastructure/infrastructure.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/extensions/infrastructure/infrastructure.go @@ -27,6 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + gardencorev1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" "github.com/gardener/gardener/pkg/operation/common" @@ -60,12 +61,11 @@ type Values struct { Region string // SSHPublicKey is the to-be-used SSH public key of the shoot. SSHPublicKey []byte - // IsInRestorePhaseOfControlPlaneMigration indicates if the Shoot is in the restoration - // phase of the ControlPlane migration. - IsInRestorePhaseOfControlPlaneMigration bool - // DeploymentRequested indicates if the Infrastructure deployment was explicitly requested, - // i.e., if the Shoot was annotated with the "infrastructure" task. - DeploymentRequested bool + // AnnotateOperation indicates if the Infrastructure resource shall be annotated with the + // respective "gardener.cloud/operation" (forcing a reconciliation or restoration). If this is false + // then the Infrastructure object will be created/updated but the extension controller will not + // act upon it. + AnnotateOperation bool } // New creates a new instance of an ExtensionInfrastructure deployer. @@ -73,14 +73,17 @@ func New( logger logrus.FieldLogger, client client.Client, values *Values, + waitInterval time.Duration, + waitSevereThreshold time.Duration, + waitTimeout time.Duration, ) shoot.ExtensionInfrastructure { return &infrastructure{ client: client, logger: logger, values: values, - waitInterval: DefaultInterval, - waitSevereThreshold: DefaultSevereThreshold, - waitTimeout: DefaultTimeout, + waitInterval: waitInterval, + waitSevereThreshold: waitSevereThreshold, + waitTimeout: waitTimeout, } } @@ -98,11 +101,13 @@ type infrastructure struct { // Deploy uses the seed client to create or update the Infrastructure resource. func (i *infrastructure) Deploy(ctx context.Context) error { + _, err := i.deploy(ctx, v1beta1constants.GardenerOperationReconcile) + return err +} + +func (i *infrastructure) deploy(ctx context.Context, operation string) (extensionsv1alpha1.Object, error) { var ( - operation = v1beta1constants.GardenerOperationReconcile - restorePhase = i.values.IsInRestorePhaseOfControlPlaneMigration - requestOperation = i.values.DeploymentRequested || restorePhase - infrastructure = &extensionsv1alpha1.Infrastructure{ + infra = &extensionsv1alpha1.Infrastructure{ ObjectMeta: metav1.ObjectMeta{ Name: i.values.Name, Namespace: i.values.Namespace, @@ -117,17 +122,13 @@ func (i *infrastructure) Deploy(ctx context.Context) error { } } - if restorePhase { - operation = v1beta1constants.GardenerOperationWaitForState - } - - _, err := controllerutil.CreateOrUpdate(ctx, i.client, infrastructure, func() error { - if requestOperation { - metav1.SetMetaDataAnnotation(&infrastructure.ObjectMeta, v1beta1constants.GardenerOperation, operation) - metav1.SetMetaDataAnnotation(&infrastructure.ObjectMeta, v1beta1constants.GardenerTimestamp, TimeNow().UTC().String()) + _, err := controllerutil.CreateOrUpdate(ctx, i.client, infra, func() error { + if i.values.AnnotateOperation { + metav1.SetMetaDataAnnotation(&infra.ObjectMeta, v1beta1constants.GardenerOperation, operation) + metav1.SetMetaDataAnnotation(&infra.ObjectMeta, v1beta1constants.GardenerTimestamp, TimeNow().UTC().String()) } - infrastructure.Spec = extensionsv1alpha1.InfrastructureSpec{ + infra.Spec = extensionsv1alpha1.InfrastructureSpec{ DefaultSpec: extensionsv1alpha1.DefaultSpec{ Type: i.values.Type, ProviderConfig: providerConfig, @@ -136,12 +137,36 @@ func (i *infrastructure) Deploy(ctx context.Context) error { SSHPublicKey: i.values.SSHPublicKey, SecretRef: corev1.SecretReference{ Name: v1beta1constants.SecretNameCloudProvider, - Namespace: infrastructure.Namespace, + Namespace: infra.Namespace, }, } return nil }) - return err + + return infra, err +} + +// Restore uses the seed client and the ShootState to create the Infrastructure resources and restore their state. +func (i *infrastructure) Restore(ctx context.Context, shootState *gardencorev1alpha1.ShootState) error { + return common.RestoreExtensionWithDeployFunction( + ctx, + shootState, + i.client, + extensionsv1alpha1.InfrastructureResource, + i.values.Namespace, + i.deploy, + ) +} + +// Migrate migrates the Infrastructure resources. +func (i *infrastructure) Migrate(ctx context.Context) error { + return common.MigrateExtensionCR( + ctx, + i.client, + func() extensionsv1alpha1.Object { return &extensionsv1alpha1.Infrastructure{} }, + i.values.Namespace, + i.values.Name, + ) } // Destroy deletes the Infrastructure resource. @@ -181,6 +206,19 @@ func (i *infrastructure) Wait(ctx context.Context) error { ) } +// WaitMigrate waits until the Infrastructure resources are migrated successfully. +func (i *infrastructure) WaitMigrate(ctx context.Context) error { + return common.WaitUntilExtensionCRMigrated( + ctx, + i.client, + func() extensionsv1alpha1.Object { return &extensionsv1alpha1.Infrastructure{} }, + i.values.Namespace, + i.values.Name, + i.waitInterval, + i.waitTimeout, + ) +} + // WaitCleanup waits until the Infrastructure resource is deleted. func (i *infrastructure) WaitCleanup(ctx context.Context) error { return common.WaitUntilExtensionCRDeleted( diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/extensions/worker/worker.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/extensions/worker/worker.go new file mode 100644 index 000000000..8092b8b9e --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/extensions/worker/worker.go @@ -0,0 +1,355 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package worker + +import ( + "context" + "fmt" + "time" + + gardencorev1alpha1 "github.com/gardener/gardener/pkg/apis/core/v1alpha1" + gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + gardencorev1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper" + extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" + "github.com/gardener/gardener/pkg/operation/common" + "github.com/gardener/gardener/pkg/operation/shoot" + "github.com/gardener/gardener/pkg/utils" + + "github.com/Masterminds/semver" + "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + // DefaultInterval is the default interval for retry operations. + DefaultInterval = 5 * time.Second + // DefaultSevereThreshold is the default threshold until an error reported by another component is treated as + // 'severe'. + DefaultSevereThreshold = 30 * time.Second + // DefaultTimeout is the default timeout and defines how long Gardener should wait for a successful reconciliation + // of a Worker resource. + DefaultTimeout = 10 * time.Minute +) + +// TimeNow returns the current time. Exposed for testing. +var TimeNow = time.Now + +// Values contains the values used to create a Worker resources. +type Values struct { + // Namespace is the Shoot namespace in the seed. + Namespace string + // Name is the name of the Worker resource. + Name string + // Type is the type of the Worker provider. + Type string + // Region is the region of the shoot. + Region string + // Workers is the list of worker pools. + Workers []gardencorev1beta1.Worker + // KubernetesVersion is the Kubernetes version of the cluster for which the worker nodes shall be created. + KubernetesVersion *semver.Version + // SSHPublicKey is the public SSH key that shall be installed on the worker nodes. + SSHPublicKey []byte + // InfrastructureProviderStatus is the provider status of the Infrastructure resource which might be relevant for + // the Worker reconciliation. + InfrastructureProviderStatus *runtime.RawExtension + // OperatingSystemConfigsMap contains the operating system configurations for the worker pools. + OperatingSystemConfigsMap map[string]shoot.OperatingSystemConfigs +} + +// New creates a new instance of a Worker deployer. +func New( + logger logrus.FieldLogger, + client client.Client, + values *Values, + waitInterval time.Duration, + waitSevereThreshold time.Duration, + waitTimeout time.Duration, +) shoot.ExtensionWorker { + return &worker{ + client: client, + logger: logger, + values: values, + waitInterval: waitInterval, + waitSevereThreshold: waitSevereThreshold, + waitTimeout: waitTimeout, + } +} + +type worker struct { + values *Values + logger logrus.FieldLogger + client client.Client + waitInterval time.Duration + waitSevereThreshold time.Duration + waitTimeout time.Duration + + machineDeployments []extensionsv1alpha1.MachineDeployment +} + +// Deploy uses the seed client to create or update the Worker resource. +func (w *worker) Deploy(ctx context.Context) error { + _, err := w.deploy(ctx, v1beta1constants.GardenerOperationReconcile) + return err +} + +func (w *worker) deploy(ctx context.Context, operation string) (extensionsv1alpha1.Object, error) { + var ( + worker = &extensionsv1alpha1.Worker{ + ObjectMeta: metav1.ObjectMeta{ + Name: w.values.Name, + Namespace: w.values.Namespace, + }, + } + pools []extensionsv1alpha1.WorkerPool + ) + + for _, workerPool := range w.values.Workers { + var volume *extensionsv1alpha1.Volume + if workerPool.Volume != nil { + volume = &extensionsv1alpha1.Volume{ + Name: workerPool.Volume.Name, + Type: workerPool.Volume.Type, + Size: workerPool.Volume.VolumeSize, + Encrypted: workerPool.Volume.Encrypted, + } + } + + var dataVolumes []extensionsv1alpha1.DataVolume + if len(workerPool.DataVolumes) > 0 { + for _, dataVolume := range workerPool.DataVolumes { + dataVolumes = append(dataVolumes, extensionsv1alpha1.DataVolume{ + Name: dataVolume.Name, + Type: dataVolume.Type, + Size: dataVolume.VolumeSize, + Encrypted: dataVolume.Encrypted, + }) + } + } + + // copy labels map + labels := utils.MergeStringMaps(workerPool.Labels) + if labels == nil { + labels = map[string]string{} + } + + // k8s node role labels + if versionConstraintK8sSmaller115.Check(w.values.KubernetesVersion) { + labels["kubernetes.io/role"] = "node" + labels["node-role.kubernetes.io/node"] = "" + } else { + labels["node.kubernetes.io/role"] = "node" + } + + if gardencorev1beta1helper.SystemComponentsAllowed(&workerPool) { + labels[v1beta1constants.LabelWorkerPoolSystemComponents] = "true" + } + + // worker pool name labels + labels[v1beta1constants.LabelWorkerPool] = workerPool.Name + labels[v1beta1constants.LabelWorkerPoolDeprecated] = workerPool.Name + + // add CRI labels selected by the RuntimeClass + if workerPool.CRI != nil { + labels[extensionsv1alpha1.CRINameWorkerLabel] = string(workerPool.CRI.Name) + if len(workerPool.CRI.ContainerRuntimes) > 0 { + for _, cr := range workerPool.CRI.ContainerRuntimes { + key := fmt.Sprintf(extensionsv1alpha1.ContainerRuntimeNameWorkerLabel, cr.Type) + labels[key] = "true" + } + } + } + + var pConfig *runtime.RawExtension + if workerPool.ProviderConfig != nil { + pConfig = &runtime.RawExtension{ + Raw: workerPool.ProviderConfig.Raw, + } + } + + var userData []byte + if val, ok := w.values.OperatingSystemConfigsMap[workerPool.Name]; ok { + userData = []byte(val.Downloader.Data.Content) + } + + pools = append(pools, extensionsv1alpha1.WorkerPool{ + Name: workerPool.Name, + Minimum: workerPool.Minimum, + Maximum: workerPool.Maximum, + MaxSurge: *workerPool.MaxSurge, + MaxUnavailable: *workerPool.MaxUnavailable, + Annotations: workerPool.Annotations, + Labels: labels, + Taints: workerPool.Taints, + MachineType: workerPool.Machine.Type, + MachineImage: extensionsv1alpha1.MachineImage{ + Name: workerPool.Machine.Image.Name, + Version: *workerPool.Machine.Image.Version, + }, + ProviderConfig: pConfig, + UserData: userData, + Volume: volume, + DataVolumes: dataVolumes, + KubeletDataVolumeName: workerPool.KubeletDataVolumeName, + Zones: workerPool.Zones, + MachineControllerManagerSettings: workerPool.MachineControllerManagerSettings, + }) + } + + _, err := controllerutil.CreateOrUpdate(ctx, w.client, worker, func() error { + metav1.SetMetaDataAnnotation(&worker.ObjectMeta, v1beta1constants.GardenerOperation, operation) + metav1.SetMetaDataAnnotation(&worker.ObjectMeta, v1beta1constants.GardenerTimestamp, TimeNow().UTC().String()) + + worker.Spec = extensionsv1alpha1.WorkerSpec{ + DefaultSpec: extensionsv1alpha1.DefaultSpec{ + Type: w.values.Type, + }, + Region: w.values.Region, + SecretRef: corev1.SecretReference{ + Name: v1beta1constants.SecretNameCloudProvider, + Namespace: worker.Namespace, + }, + SSHPublicKey: w.values.SSHPublicKey, + InfrastructureProviderStatus: w.values.InfrastructureProviderStatus, + Pools: pools, + } + + return nil + }) + + return worker, err +} + +// Restore uses the seed client and the ShootState to create the Worker resources and restore their state. +func (w *worker) Restore(ctx context.Context, shootState *gardencorev1alpha1.ShootState) error { + return common.RestoreExtensionWithDeployFunction( + ctx, + shootState, + w.client, + extensionsv1alpha1.WorkerResource, + w.values.Namespace, + w.deploy, + ) +} + +// Migrate migrates the Worker resource. +func (w *worker) Migrate(ctx context.Context) error { + return common.MigrateExtensionCR( + ctx, + w.client, + func() extensionsv1alpha1.Object { return &extensionsv1alpha1.Worker{} }, + w.values.Namespace, + w.values.Name, + ) +} + +// Destroy deletes the Worker resource. +func (w *worker) Destroy(ctx context.Context) error { + return common.DeleteExtensionCR( + ctx, + w.client, + func() extensionsv1alpha1.Object { return &extensionsv1alpha1.Worker{} }, + w.values.Namespace, + w.values.Name, + ) +} + +// Wait waits until the Worker resource is ready. +func (w *worker) Wait(ctx context.Context) error { + return common.WaitUntilExtensionCRReady( + ctx, + w.client, + w.logger, + func() runtime.Object { return &extensionsv1alpha1.Worker{} }, + extensionsv1alpha1.WorkerResource, + w.values.Namespace, + w.values.Name, + w.waitInterval, + w.waitSevereThreshold, + w.waitTimeout, + func(obj runtime.Object) error { + worker, ok := obj.(*extensionsv1alpha1.Worker) + if !ok { + return fmt.Errorf("expected extensionsv1alpha1.Worker but got %T", worker) + } + + w.machineDeployments = worker.Status.MachineDeployments + return nil + }, + ) +} + +// WaitMigrate waits until the Worker resources are migrated successfully. +func (w *worker) WaitMigrate(ctx context.Context) error { + return common.WaitUntilExtensionCRMigrated( + ctx, + w.client, + func() extensionsv1alpha1.Object { return &extensionsv1alpha1.Worker{} }, + w.values.Namespace, + w.values.Name, + w.waitInterval, + w.waitTimeout, + ) +} + +// WaitCleanup waits until the Worker resource is deleted. +func (w *worker) WaitCleanup(ctx context.Context) error { + return common.WaitUntilExtensionCRDeleted( + ctx, + w.client, + w.logger, + func() extensionsv1alpha1.Object { return &extensionsv1alpha1.Worker{} }, + extensionsv1alpha1.WorkerResource, + w.values.Namespace, + w.values.Name, + w.waitInterval, + w.waitTimeout, + ) +} + +// SetPublicSSHKey sets the public SSH key in the values. +func (w *worker) SetSSHPublicKey(key []byte) { + w.values.SSHPublicKey = key +} + +// SetInfrastructureProviderStatus sets the infrastructure provider status in the values. +func (w *worker) SetInfrastructureProviderStatus(status *runtime.RawExtension) { + w.values.InfrastructureProviderStatus = status +} + +// SetOperatingSystemConfigMaps sets the operating system config maps in the values. +func (w *worker) SetOperatingSystemConfigMaps(maps map[string]shoot.OperatingSystemConfigs) { + w.values.OperatingSystemConfigsMap = maps +} + +// MachineDeployments returns the generated machine deployments of the Worker. +func (w *worker) MachineDeployments() []extensionsv1alpha1.MachineDeployment { + return w.machineDeployments +} + +var versionConstraintK8sSmaller115 *semver.Constraints + +func init() { + var err error + + versionConstraintK8sSmaller115, err = semver.NewConstraint("< 1.15") + utilruntime.Must(err) +} diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/health_check.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/health_check.go index ee06427bc..120cf656b 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/health_check.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/health_check.go @@ -53,13 +53,12 @@ import ( ) func mustGardenRoleLabelSelector(gardenRoles ...string) labels.Selector { - // TODO (ialidzhikov): switch to v1beta1constants.GardenRole in a future version. if len(gardenRoles) == 1 { - return labels.SelectorFromSet(map[string]string{v1beta1constants.DeprecatedGardenRole: gardenRoles[0]}) + return labels.SelectorFromSet(map[string]string{v1beta1constants.GardenRole: gardenRoles[0]}) } selector := labels.NewSelector() - requirement, err := labels.NewRequirement(v1beta1constants.DeprecatedGardenRole, selection.In, gardenRoles) + requirement, err := labels.NewRequirement(v1beta1constants.GardenRole, selection.In, gardenRoles) if err != nil { panic(err) } @@ -277,6 +276,8 @@ func computeRequiredControlPlaneDeployments( return nil, err } + // TODO: This check can be removed after few releases, as the cluster-autoscaler is now enabled even + // during the rolling-update. Related change: https://github.com/gardener/gardener/pull/3332 // if worker resource is processing (during maintenance), there might be a rolling update in progress // during rolling updates, the autoscaler deployment is scaled down & therefore not required rollingUpdateMightBeOngoing := false diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/infrastructure.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/infrastructure.go index d88cb1ddb..1e606381a 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/infrastructure.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/infrastructure.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,14 +18,12 @@ import ( "context" v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" - extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" "github.com/gardener/gardener/pkg/controllerutils" "github.com/gardener/gardener/pkg/operation/botanist/extensions/infrastructure" "github.com/gardener/gardener/pkg/operation/common" "github.com/gardener/gardener/pkg/operation/shoot" "github.com/gardener/gardener/pkg/utils/secrets" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -35,14 +33,16 @@ func (b *Botanist) DefaultInfrastructure(seedClient client.Client) shoot.Extensi b.Logger, seedClient, &infrastructure.Values{ - Namespace: b.Shoot.SeedNamespace, - Name: b.Shoot.Info.Name, - Type: b.Shoot.Info.Spec.Provider.Type, - ProviderConfig: b.Shoot.Info.Spec.Provider.InfrastructureConfig, - Region: b.Shoot.Info.Spec.Region, - IsInRestorePhaseOfControlPlaneMigration: b.isRestorePhase(), - DeploymentRequested: controllerutils.HasTask(b.Shoot.Info.Annotations, common.ShootTaskDeployInfrastructure), + Namespace: b.Shoot.SeedNamespace, + Name: b.Shoot.Info.Name, + Type: b.Shoot.Info.Spec.Provider.Type, + ProviderConfig: b.Shoot.Info.Spec.Provider.InfrastructureConfig, + Region: b.Shoot.Info.Spec.Region, + AnnotateOperation: controllerutils.HasTask(b.Shoot.Info.Annotations, common.ShootTaskDeployInfrastructure) || b.isRestorePhase(), }, + infrastructure.DefaultInterval, + infrastructure.DefaultSevereThreshold, + infrastructure.DefaultTimeout, ) } @@ -51,19 +51,11 @@ func (b *Botanist) DefaultInfrastructure(seedClient client.Client) shoot.Extensi func (b *Botanist) DeployInfrastructure(ctx context.Context) error { b.Shoot.Components.Extensions.Infrastructure.SetSSHPublicKey(b.Secrets[v1beta1constants.SecretNameSSHKeyPair].Data[secrets.DataKeySSHAuthorizedKeys]) - if err := b.Shoot.Components.Extensions.Infrastructure.Deploy(ctx); err != nil { - return err - } - if b.isRestorePhase() { - return b.restoreExtensionObject(ctx, &extensionsv1alpha1.Infrastructure{ - ObjectMeta: metav1.ObjectMeta{ - Name: b.Shoot.Info.Name, - Namespace: b.Shoot.SeedNamespace, - }, - }, extensionsv1alpha1.InfrastructureResource) + return b.Shoot.Components.Extensions.Infrastructure.Restore(ctx, b.ShootState) } - return nil + + return b.Shoot.Components.Extensions.Infrastructure.Deploy(ctx) } // WaitForInfrastructure waits until the infrastructure reconciliation has finished and extracts the provider status diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/migration.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/migration.go index ae8c2a1df..9b08e1aeb 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/migration.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/migration.go @@ -32,12 +32,20 @@ import ( // AnnotateExtensionCRsForMigration annotates extension CRs with migrate operation annotation func (b *Botanist) AnnotateExtensionCRsForMigration(ctx context.Context) (err error) { - var fns []flow.TaskFn - fns, err = b.applyFuncToAllExtensionCRs(ctx, annotateObjectForMigrationFunc(ctx, b.K8sSeedClient.DirectClient())) + fns, err := b.applyFuncToAllExtensionCRs(ctx, annotateObjectForMigrationFunc(ctx, b.K8sSeedClient.DirectClient())) if err != nil { return err } - fns = append(fns, b.Shoot.Components.Extensions.Network.Migrate, b.Shoot.Components.Extensions.ContainerRuntime.Migrate) + + fns = append(fns, + b.Shoot.Components.Extensions.ContainerRuntime.Migrate, + b.Shoot.Components.Extensions.ControlPlane.Migrate, + b.Shoot.Components.Extensions.ControlPlaneExposure.Migrate, + b.Shoot.Components.Extensions.Extension.Migrate, + b.Shoot.Components.Extensions.Infrastructure.Migrate, + b.Shoot.Components.Extensions.Network.Migrate, + b.Shoot.Components.Extensions.Worker.Migrate, + ) return flow.Parallel(fns...)(ctx) } @@ -69,7 +77,16 @@ func (b *Botanist) WaitForExtensionsOperationMigrateToSucceed(ctx context.Contex if err != nil { return err } - fns = append(fns, b.Shoot.Components.Extensions.Network.WaitMigrate, b.Shoot.Components.Extensions.ContainerRuntime.WaitMigrate) + + fns = append(fns, + b.Shoot.Components.Extensions.ContainerRuntime.WaitMigrate, + b.Shoot.Components.Extensions.ControlPlane.WaitMigrate, + b.Shoot.Components.Extensions.ControlPlaneExposure.WaitMigrate, + b.Shoot.Components.Extensions.Extension.WaitMigrate, + b.Shoot.Components.Extensions.Infrastructure.WaitMigrate, + b.Shoot.Components.Extensions.Network.WaitMigrate, + b.Shoot.Components.Extensions.Worker.WaitMigrate, + ) return flow.Parallel(fns...)(ctx) } @@ -81,13 +98,21 @@ func (b *Botanist) DeleteAllExtensionCRs(ctx context.Context) error { if err != nil { return err } - return common.DeleteExtensionCR(ctx, b.K8sSeedClient.Client(), func() extensionsv1alpha1.Object { return extensionObj }, extensionObj.GetNamespace(), extensionObj.GetName()) }) if err != nil { return err } - fns = append(fns, b.Shoot.Components.Extensions.Network.Destroy, b.Shoot.Components.Extensions.ContainerRuntime.Destroy) + + fns = append(fns, + b.Shoot.Components.Extensions.ContainerRuntime.Destroy, + b.Shoot.Components.Extensions.ControlPlane.Destroy, + b.Shoot.Components.Extensions.ControlPlaneExposure.Destroy, + b.Shoot.Components.Extensions.Extension.Destroy, + b.Shoot.Components.Extensions.Infrastructure.Destroy, + b.Shoot.Components.Extensions.Network.Destroy, + b.Shoot.Components.Extensions.Worker.Destroy, + ) return flow.Parallel(fns...)(ctx) } @@ -159,6 +184,7 @@ func (b *Botanist) DeleteBackupEntryFromSeed(ctx context.Context) error { func (b *Botanist) isRestorePhase() bool { return b.Shoot != nil && + b.Shoot.Info != nil && b.Shoot.Info.Status.LastOperation != nil && b.Shoot.Info.Status.LastOperation.Type == gardencorev1beta1.LastOperationTypeRestore } diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/network.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/network.go index 7f425add8..49c71dceb 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/network.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/network.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/secrets.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/secrets.go index d57c2ceac..97756ac14 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/secrets.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/secrets.go @@ -46,7 +46,7 @@ import ( func (b *Botanist) GenerateAndSaveSecrets(ctx context.Context) error { gardenerResourceDataList := gardencorev1alpha1helper.GardenerResourceDataList(b.ShootState.Spec.Gardener).DeepCopy() - if val, ok := common.GetShootOperationAnnotation(b.Shoot.Info.Annotations); ok && val == common.ShootOperationRotateKubeconfigCredentials { + if val, ok := b.Shoot.Info.Annotations[v1beta1constants.GardenerOperation]; ok && val == common.ShootOperationRotateKubeconfigCredentials { if err := b.rotateKubeconfigSecrets(ctx, &gardenerResourceDataList); err != nil { return err } @@ -221,7 +221,6 @@ func (b *Botanist) rotateKubeconfigSecrets(ctx context.Context, gardenerResource } _, err := kutil.TryUpdateShootAnnotations(ctx, b.K8sGardenClient.GardenCore(), retry.DefaultRetry, b.Shoot.Info.ObjectMeta, func(shoot *gardencorev1beta1.Shoot) (*gardencorev1beta1.Shoot, error) { delete(shoot.Annotations, v1beta1constants.GardenerOperation) - delete(shoot.Annotations, common.ShootOperationDeprecated) return shoot, nil }) return err @@ -282,7 +281,6 @@ const ( secretSuffixKubeConfig = "kubeconfig" secretSuffixSSHKeyPair = v1beta1constants.SecretNameSSHKeyPair secretSuffixMonitoring = "monitoring" - secretSuffixLogging = "logging" // deprecated, used only to delete unused secrets ) func computeProjectSecretName(shootName, suffix string) string { @@ -350,19 +348,6 @@ func (b *Botanist) SyncShootCredentialsToGarden(ctx context.Context) error { }) } - // Clean Kibana credentials that are not used since https://github.com/gardener/gardener/pull/2515 - // TOOD, remove in future version. - fns = append(fns, func(ctx context.Context) error { - secretObj := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: computeProjectSecretName(b.Shoot.Info.Name, secretSuffixLogging), - Namespace: b.Shoot.Info.Namespace, - }, - } - - return client.IgnoreNotFound(b.K8sGardenClient.Client().Delete(ctx, secretObj, &client.DeleteOptions{})) - }) - return flow.Parallel(fns...)(ctx) } diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/seedsystemcomponents/seedadmission/seedadmission.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/seedsystemcomponents/seedadmission/seedadmission.go index 961859247..b5615c917 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/seedsystemcomponents/seedadmission/seedadmission.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/seedsystemcomponents/seedadmission/seedadmission.go @@ -45,13 +45,16 @@ import ( ) const ( - name = "gardener-seed-admission-controller" - managedResourceName = name - deploymentName = name - containerName = name + // Name is used as metadata.name of the ServiceAccount, ManagedResource, + // ClusterRole, ClusterRoleBinding, Service, Deployment and ValidatingWebhookConfiguration + // of the seed admission controller. + Name = "gardener-seed-admission-controller" + managedResourceName = Name + deploymentName = Name + containerName = Name port = 10250 - volumeName = name + "-tls" + volumeName = Name + "-tls" volumeMountPath = "/srv/gardener-seed-admission-controller" ) @@ -83,7 +86,7 @@ func (g *gardenerSeedAdmissionController) Deploy(ctx context.Context) error { serviceAccount = &corev1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: Name, Namespace: g.namespace, Labels: getLabels(), }, @@ -91,7 +94,7 @@ func (g *gardenerSeedAdmissionController) Deploy(ctx context.Context) error { clusterRole = &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: Name, Labels: getLabels(), }, Rules: []rbacv1.PolicyRule{ @@ -121,7 +124,7 @@ func (g *gardenerSeedAdmissionController) Deploy(ctx context.Context) error { clusterRoleBinding = &rbacv1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: Name, Labels: getLabels(), }, RoleRef: rbacv1.RoleRef{ @@ -138,7 +141,7 @@ func (g *gardenerSeedAdmissionController) Deploy(ctx context.Context) error { service = &corev1.Service{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: Name, Namespace: g.namespace, Labels: getLabels(), }, @@ -156,7 +159,7 @@ func (g *gardenerSeedAdmissionController) Deploy(ctx context.Context) error { secret = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: name + "-tls", + Name: Name + "-tls", Namespace: g.namespace, Labels: getLabels(), }, @@ -239,7 +242,7 @@ func (g *gardenerSeedAdmissionController) Deploy(ctx context.Context) error { minAvailable = intstr.FromInt(1) podDisruptionBudget = &policyv1beta1.PodDisruptionBudget{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: Name, Namespace: g.namespace, Labels: getLabels(), }, @@ -254,7 +257,7 @@ func (g *gardenerSeedAdmissionController) Deploy(ctx context.Context) error { updateMode = autoscalingv1beta2.UpdateModeAuto vpa = &autoscalingv1beta2.VerticalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ - Name: name + "-vpa", + Name: Name + "-vpa", Namespace: g.namespace, Labels: getLabels(), }, @@ -273,7 +276,7 @@ func (g *gardenerSeedAdmissionController) Deploy(ctx context.Context) error { failurePolicy = admissionregistrationv1beta1.Fail validatingWebhookConfiguration = &admissionregistrationv1beta1.ValidatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: Name, Namespace: g.namespace, Labels: getLabels(), }, @@ -291,7 +294,7 @@ func (g *gardenerSeedAdmissionController) Deploy(ctx context.Context) error { FailurePolicy: &failurePolicy, NamespaceSelector: &metav1.LabelSelector{}, ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{ - CABundle: []byte(tlsCACert), + CABundle: []byte(TLSCACert), Service: &admissionregistrationv1beta1.ServiceReference{ Name: service.Name, Namespace: service.Namespace, @@ -322,7 +325,7 @@ func (g *gardenerSeedAdmissionController) Deploy(ctx context.Context) error { FailurePolicy: &failurePolicy, NamespaceSelector: &metav1.LabelSelector{}, ClientConfig: admissionregistrationv1beta1.WebhookClientConfig{ - CABundle: []byte(tlsCACert), + CABundle: []byte(TLSCACert), Service: &admissionregistrationv1beta1.ServiceReference{ Name: service.Name, Namespace: service.Namespace, @@ -397,7 +400,11 @@ func init() { } const ( - tlsCACert = `-----BEGIN CERTIFICATE----- + // TLSCACert is the of certificate authority of the + // seed admission controller server. + // TODO(mvladev) this cert is hard-coded. + // fix it in another PR. + TLSCACert = `-----BEGIN CERTIFICATE----- MIIC+jCCAeKgAwIBAgIUTp3XvhrWOVM8ZGe86YoXMV/UJ7AwDQYJKoZIhvcNAQEL BQAwFTETMBEGA1UEAxMKa3ViZXJuZXRlczAeFw0xOTAyMjcxNTM0MDBaFw0yNDAy MjYxNTM0MDBaMBUxEzARBgNVBAMTCmt1YmVybmV0ZXMwggEiMA0GCSqGSIb3DQEB diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/waiter.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/waiter.go index 28715053f..839f3a745 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/waiter.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/waiter.go @@ -23,15 +23,18 @@ import ( gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + gardencorev1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper" "github.com/gardener/gardener/pkg/operation/common" kutil "github.com/gardener/gardener/pkg/utils/kubernetes" "github.com/gardener/gardener/pkg/utils/retry" + errorspkg "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -74,35 +77,58 @@ func (b *Botanist) WaitUntilKubeAPIServerIsDeleted(ctx context.Context) error { // WaitUntilKubeAPIServerReady waits until the kube-apiserver pod(s) indicate readiness in their statuses. func (b *Botanist) WaitUntilKubeAPIServerReady(ctx context.Context) error { - return retry.UntilTimeout(ctx, 5*time.Second, 300*time.Second, func(ctx context.Context) (done bool, err error) { + deployment := &appsv1.Deployment{} - deploy := &appsv1.Deployment{} - if err := b.K8sSeedClient.DirectClient().Get(ctx, kutil.Key(b.Shoot.SeedNamespace, v1beta1constants.DeploymentNameKubeAPIServer), deploy); err != nil { + if err := retry.UntilTimeout(ctx, 5*time.Second, 300*time.Second, func(ctx context.Context) (done bool, err error) { + if err := b.K8sSeedClient.DirectClient().Get(ctx, kutil.Key(b.Shoot.SeedNamespace, v1beta1constants.DeploymentNameKubeAPIServer), deployment); err != nil { return retry.SevereError(err) } - if deploy.Generation != deploy.Status.ObservedGeneration { + if deployment.Generation != deployment.Status.ObservedGeneration { return retry.MinorError(fmt.Errorf("kube-apiserver not observed at latest generation (%d/%d)", - deploy.Status.ObservedGeneration, deploy.Generation)) + deployment.Status.ObservedGeneration, deployment.Generation)) } replicas := int32(0) - if deploy.Spec.Replicas != nil { - replicas = *deploy.Spec.Replicas + if deployment.Spec.Replicas != nil { + replicas = *deployment.Spec.Replicas } - if replicas != deploy.Status.UpdatedReplicas { + if replicas != deployment.Status.UpdatedReplicas { return retry.MinorError(fmt.Errorf("kube-apiserver does not have enough updated replicas (%d/%d)", - deploy.Status.UpdatedReplicas, replicas)) + deployment.Status.UpdatedReplicas, replicas)) } - if replicas != deploy.Status.Replicas { + if replicas != deployment.Status.Replicas { return retry.MinorError(fmt.Errorf("kube-apiserver deployment has outdated replicas")) } - if replicas != deploy.Status.AvailableReplicas { + if replicas != deployment.Status.AvailableReplicas { return retry.MinorError(fmt.Errorf("kube-apiserver does not have enough available replicas (%d/%d", - deploy.Status.AvailableReplicas, replicas)) + deployment.Status.AvailableReplicas, replicas)) } return retry.Ok() - }) + }); err != nil { + var retryError *retry.Error + if !errors.As(err, &retryError) { + return err + } + + newestPod, err2 := kutil.NewestPodForDeployment(ctx, b.K8sSeedClient.DirectClient(), deployment) + if err2 != nil { + return errorspkg.Wrapf(err, "failure to find the newest pod for deployment to read the logs: %s", err2.Error()) + } + if newestPod == nil { + return err + } + + logs, err2 := kutil.MostRecentCompleteLogs(ctx, b.K8sSeedClient.Kubernetes().CoreV1().Pods(newestPod.Namespace), newestPod, "kube-apiserver", pointer.Int64Ptr(10)) + if err2 != nil { + return errorspkg.Wrapf(err, "failure to read the logs: %s", err2.Error()) + } + + errWithLogs := fmt.Errorf("%s, logs of newest pod:\n%s", err.Error(), logs) + return gardencorev1beta1helper.DetermineError(errWithLogs, errWithLogs.Error()) + } + + return nil } // WaitForKubeControllerManagerToBeActive waits for the kube controller manager of a Shoot cluster has acquired leader election, thus is active. diff --git a/vendor/github.com/gardener/gardener/pkg/operation/botanist/worker.go b/vendor/github.com/gardener/gardener/pkg/operation/botanist/worker.go index 12ba51405..4152fe6de 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/botanist/worker.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/botanist/worker.go @@ -16,216 +16,45 @@ package botanist import ( "context" - "fmt" - "strconv" - "time" v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" - v1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper" - extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" - "github.com/gardener/gardener/pkg/operation/common" + "github.com/gardener/gardener/pkg/operation/botanist/extensions/worker" + "github.com/gardener/gardener/pkg/operation/shoot" "github.com/gardener/gardener/pkg/utils/secrets" - versionutils "github.com/gardener/gardener/pkg/utils/version" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/client" ) -// WorkerDefaultTimeout is the default timeout and defines how long Gardener should wait -// for a successful reconciliation of a worker resource. -const WorkerDefaultTimeout = 10 * time.Minute - -// DeployWorker creates the `Worker` extension resource in the shoot namespace in the seed -// cluster. Gardener waits until an external controller did reconcile the resource successfully. -func (b *Botanist) DeployWorker(ctx context.Context) error { - var ( - operation = v1beta1constants.GardenerOperationReconcile - restorePhase = b.isRestorePhase() - worker = &extensionsv1alpha1.Worker{ - ObjectMeta: metav1.ObjectMeta{ - Name: b.Shoot.Info.Name, - Namespace: b.Shoot.SeedNamespace, - }, - } - - pools []extensionsv1alpha1.WorkerPool - ) - - k8sVersionLessThan115, err := versionutils.CompareVersions(b.Shoot.Info.Spec.Kubernetes.Version, "<", "1.15") - if err != nil { - return err - } - - for _, workerPool := range b.Shoot.Info.Spec.Provider.Workers { - var volume *extensionsv1alpha1.Volume - if workerPool.Volume != nil { - volume = &extensionsv1alpha1.Volume{ - Name: workerPool.Volume.Name, - Type: workerPool.Volume.Type, - Size: workerPool.Volume.VolumeSize, - Encrypted: workerPool.Volume.Encrypted, - } - } - - var dataVolumes []extensionsv1alpha1.DataVolume - if len(workerPool.DataVolumes) > 0 { - for _, dataVolume := range workerPool.DataVolumes { - dataVolumes = append(dataVolumes, extensionsv1alpha1.DataVolume{ - Name: dataVolume.Name, - Type: dataVolume.Type, - Size: dataVolume.VolumeSize, - Encrypted: dataVolume.Encrypted, - }) - } - } - - if workerPool.Labels == nil { - workerPool.Labels = map[string]string{} - } - - // k8s node role labels - if k8sVersionLessThan115 { - workerPool.Labels["kubernetes.io/role"] = "node" - workerPool.Labels["node-role.kubernetes.io/node"] = "" - } else { - workerPool.Labels["node.kubernetes.io/role"] = "node" - } - - if v1beta1helper.SystemComponentsAllowed(&workerPool) { - workerPool.Labels[v1beta1constants.LabelWorkerPoolSystemComponents] = strconv.FormatBool(workerPool.SystemComponents.Allow) - } - - // worker pool name labels - workerPool.Labels[v1beta1constants.LabelWorkerPool] = workerPool.Name - workerPool.Labels[v1beta1constants.LabelWorkerPoolDeprecated] = workerPool.Name - - // add CRI labels selected by the RuntimeClass - if workerPool.CRI != nil { - workerPool.Labels[extensionsv1alpha1.CRINameWorkerLabel] = string(workerPool.CRI.Name) - if len(workerPool.CRI.ContainerRuntimes) > 0 { - for _, cr := range workerPool.CRI.ContainerRuntimes { - key := fmt.Sprintf(extensionsv1alpha1.ContainerRuntimeNameWorkerLabel, cr.Type) - workerPool.Labels[key] = "true" - } - } - } - - var pConfig *runtime.RawExtension - if workerPool.ProviderConfig != nil { - pConfig = &runtime.RawExtension{ - Raw: workerPool.ProviderConfig.Raw, - } - } - - pools = append(pools, extensionsv1alpha1.WorkerPool{ - Name: workerPool.Name, - Minimum: workerPool.Minimum, - Maximum: workerPool.Maximum, - MaxSurge: *workerPool.MaxSurge, - MaxUnavailable: *workerPool.MaxUnavailable, - Annotations: workerPool.Annotations, - Labels: workerPool.Labels, - Taints: workerPool.Taints, - MachineType: workerPool.Machine.Type, - MachineImage: extensionsv1alpha1.MachineImage{ - Name: workerPool.Machine.Image.Name, - Version: *workerPool.Machine.Image.Version, - }, - ProviderConfig: pConfig, - UserData: []byte(b.Shoot.OperatingSystemConfigsMap[workerPool.Name].Downloader.Data.Content), - Volume: volume, - DataVolumes: dataVolumes, - KubeletDataVolumeName: workerPool.KubeletDataVolumeName, - Zones: workerPool.Zones, - MachineControllerManagerSettings: workerPool.MachineControllerManagerSettings, - }) - } - - if restorePhase { - operation = v1beta1constants.GardenerOperationWaitForState - } - - _, err = controllerutil.CreateOrUpdate(ctx, b.K8sSeedClient.Client(), worker, func() error { - metav1.SetMetaDataAnnotation(&worker.ObjectMeta, v1beta1constants.GardenerOperation, operation) - metav1.SetMetaDataAnnotation(&worker.ObjectMeta, v1beta1constants.GardenerTimestamp, time.Now().UTC().String()) - worker.Spec = extensionsv1alpha1.WorkerSpec{ - DefaultSpec: extensionsv1alpha1.DefaultSpec{ - Type: b.Shoot.Info.Spec.Provider.Type, - }, - Region: b.Shoot.Info.Spec.Region, - SecretRef: corev1.SecretReference{ - Name: v1beta1constants.SecretNameCloudProvider, - Namespace: worker.Namespace, - }, - SSHPublicKey: b.Secrets[v1beta1constants.SecretNameSSHKeyPair].Data[secrets.DataKeySSHAuthorizedKeys], - InfrastructureProviderStatus: &runtime.RawExtension{ - Raw: b.Shoot.InfrastructureStatus, - }, - Pools: pools, - } - return nil - }) - if err != nil { - return err - } - - if restorePhase { - return b.restoreExtensionObject(ctx, worker, extensionsv1alpha1.WorkerResource) - } - - return nil -} - -// DestroyWorker deletes the `Worker` extension resource in the shoot namespace in the seed cluster, -// and it waits for a maximum of 5m until it is deleted. -func (b *Botanist) DestroyWorker(ctx context.Context) error { - return common.DeleteExtensionCR( - ctx, - b.K8sSeedClient.Client(), - func() extensionsv1alpha1.Object { return &extensionsv1alpha1.Worker{} }, - b.Shoot.SeedNamespace, - b.Shoot.Info.Name, - ) -} - -// WaitUntilWorkerReady waits until the worker extension resource has been successfully reconciled. -func (b *Botanist) WaitUntilWorkerReady(ctx context.Context) error { - return common.WaitUntilExtensionCRReady( - ctx, - b.K8sSeedClient.DirectClient(), +// DefaultWorker creates the default deployer for the Worker custom resource. +func (b *Botanist) DefaultWorker(seedClient client.Client) shoot.ExtensionWorker { + return worker.New( b.Logger, - func() runtime.Object { return &extensionsv1alpha1.Worker{} }, - "Worker", - b.Shoot.SeedNamespace, - b.Shoot.Info.Name, - DefaultInterval, - DefaultSevereThreshold, - WorkerDefaultTimeout, - func(obj runtime.Object) error { - worker, ok := obj.(*extensionsv1alpha1.Worker) - if !ok { - return fmt.Errorf("expected extensionsv1alpha1.Worker but got %T", obj) - } - - b.Shoot.MachineDeployments = worker.Status.MachineDeployments - return nil + seedClient, + &worker.Values{ + Namespace: b.Shoot.SeedNamespace, + Name: b.Shoot.Info.Name, + Type: b.Shoot.Info.Spec.Provider.Type, + Region: b.Shoot.Info.Spec.Region, + Workers: b.Shoot.Info.Spec.Provider.Workers, + KubernetesVersion: b.Shoot.KubernetesVersion, }, + worker.DefaultInterval, + worker.DefaultSevereThreshold, + worker.DefaultTimeout, ) } -// WaitUntilWorkerDeleted waits until the worker extension resource has been deleted. -func (b *Botanist) WaitUntilWorkerDeleted(ctx context.Context) error { - return common.WaitUntilExtensionCRDeleted( - ctx, - b.K8sSeedClient.DirectClient(), - b.Logger, - func() extensionsv1alpha1.Object { return &extensionsv1alpha1.Worker{} }, - "Worker", - b.Shoot.SeedNamespace, - b.Shoot.Info.Name, - DefaultInterval, - WorkerDefaultTimeout, - ) +// DeployWorker deploys the Worker custom resource and triggers the restore operation in case +// the Shoot is in the restore phase of the control plane migration +func (b *Botanist) DeployWorker(ctx context.Context) error { + b.Shoot.Components.Extensions.Worker.SetSSHPublicKey(b.Secrets[v1beta1constants.SecretNameSSHKeyPair].Data[secrets.DataKeySSHAuthorizedKeys]) + b.Shoot.Components.Extensions.Worker.SetInfrastructureProviderStatus(&runtime.RawExtension{Raw: b.Shoot.InfrastructureStatus}) + b.Shoot.Components.Extensions.Worker.SetOperatingSystemConfigMaps(b.Shoot.OperatingSystemConfigsMap) + + if b.isRestorePhase() { + return b.Shoot.Components.Extensions.Worker.Restore(ctx, b.ShootState) + } + + return b.Shoot.Components.Extensions.Worker.Deploy(ctx) } diff --git a/vendor/github.com/gardener/gardener/pkg/operation/common/types.go b/vendor/github.com/gardener/gardener/pkg/operation/common/types.go index 047c96fb8..f8bf6949e 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/common/types.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/common/types.go @@ -47,12 +47,6 @@ const ( // allow deleting the resource (if the annotation is not set any DELETE request will be denied). ConfirmationDeletion = "confirmation.gardener.cloud/deletion" - // ConfirmationDeletionDeprecated is an annotation on a Shoot resource whose value must be set to "true" in order to - // allow deleting the Shoot (if the annotation is not set any DELETE request will be denied). - // - // Deprecated: Use `ConfirmationDeletion` instead. - ConfirmationDeletionDeprecated = "confirmation.garden.sapcloud.io/deletion" - // ControllerManagerInternalConfigMapName is the name of the internal config map in which the Gardener controller // manager stores its configuration. ControllerManagerInternalConfigMapName = "gardener-controller-manager-internal-config" @@ -221,27 +215,17 @@ const ( // ProjectPrefix is the prefix of namespaces representing projects. ProjectPrefix = "garden-" - // ProjectName is they key of a label on namespaces whose value holds the project name. + // ProjectName is the key of a label on namespaces whose value holds the project name. ProjectName = "project.gardener.cloud/name" - // ProjectNameDeprecated is they key of a label on namespaces whose value holds the project name. - // - // Deprecated: Use `ProjectName` instead. - ProjectNameDeprecated = "project.garden.sapcloud.io/name" - // ProjectSkipStaleCheck is the key of an annotation on a project namespace that marks the associated Project to be // skipped by the stale project controller. If the project has already configured stale timestamps in its status // then they will be reset. ProjectSkipStaleCheck = "project.gardener.cloud/skip-stale-check" - // NamespaceProject is they key of an annotation on namespace whose value holds the project uid. + // NamespaceProject is the key of an annotation on namespace whose value holds the project uid. NamespaceProject = "namespace.gardener.cloud/project" - // NamespaceProjectDeprecated is they key of an annotation on namespace whose value holds the project uid. - // - // Deprecated: Use `NamespaceProject` instead. - NamespaceProjectDeprecated = "namespace.garden.sapcloud.io/project" - // NamespaceKeepAfterProjectDeletion is a constant for an annotation on a `Namespace` resource that states that it // should not be deleted if the corresponding `Project` gets deleted. Please note that all project related labels // from the namespace will be removed when the project is being deleted. @@ -259,18 +243,13 @@ const ( // of referenced quotas. ShootExpirationTimestamp = "shoot.gardener.cloud/expiration-timestamp" - // ShootNoCleanup is a constant for a label on a resource indicating the the Gardener cleaner should not delete this + // ShootNoCleanup is a constant for a label on a resource indicating that the Gardener cleaner should not delete this // resource when cleaning a shoot during the deletion flow. ShootNoCleanup = "shoot.gardener.cloud/no-cleanup" // ShootStatus is a constant for a label on a Shoot resource indicating that the Shoot's health. ShootStatus = "shoot.gardener.cloud/status" - // ShootOperationDeprecated is a constant for an annotation on a Shoot in a failed state indicating that an operation shall be performed. - // - // Deprecated: Use `v1beta1constants.GardenerOperation` instead. - ShootOperationDeprecated = "shoot.garden.sapcloud.io/operation" - // ShootOperationMaintain is a constant for an annotation on a Shoot indicating that the Shoot maintenance shall be executed as soon as // possible. ShootOperationMaintain = "maintain" @@ -285,7 +264,8 @@ const ( // ShootTasks is a constant for an annotation on a Shoot which states that certain tasks should be done. ShootTasks = "shoot.gardener.cloud/tasks" - // ShootTaskDeployInfrastructure is a name for a Shoot's infrastructure deployment task. + // ShootTaskDeployInfrastructure is a name for a Shoot's infrastructure deployment task. It indicates that the + // Infrastructure extension resource shall be reconciled. ShootTaskDeployInfrastructure = "deployInfrastructure" // ShootTaskRestartControlPlanePods is a name for a Shoot task which is dedicated to restart related control plane pods. diff --git a/vendor/github.com/gardener/gardener/pkg/operation/common/utils.go b/vendor/github.com/gardener/gardener/pkg/operation/common/utils.go index 37df41bc9..2ea25d51f 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/common/utils.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/common/utils.go @@ -205,22 +205,6 @@ func ProjectNameForNamespace(namespace *corev1.Namespace) string { return namespace.Name } -// MergeOwnerReferences merges the newReferences with the list of existing references. -func MergeOwnerReferences(references []metav1.OwnerReference, newReferences ...metav1.OwnerReference) []metav1.OwnerReference { - uids := make(map[types.UID]struct{}) - for _, reference := range references { - uids[reference.UID] = struct{}{} - } - - for _, newReference := range newReferences { - if _, ok := uids[newReference.UID]; !ok { - references = append(references, newReference) - } - } - - return references -} - // GardenerDeletionGracePeriod is the default grace period for Gardener's force deletion methods. var GardenerDeletionGracePeriod = 5 * time.Minute @@ -717,27 +701,6 @@ func GetSecretFromSecretRef(ctx context.Context, c client.Client, secretRef *cor return secret, nil } -// GetConfirmationDeletionAnnotation fetches the value for ConfirmationDeletion annotation. -// If not present, it fallbacks to ConfirmationDeletionDeprecated. -func GetConfirmationDeletionAnnotation(annotations map[string]string) (string, bool) { - return getDeprecatedAnnotation(annotations, ConfirmationDeletion, ConfirmationDeletionDeprecated) -} - -// GetShootOperationAnnotation fetches the value for v1beta1constants.GardenerOperation annotation. -// If not present, it fallbacks to ShootOperationDeprecated. -func GetShootOperationAnnotation(annotations map[string]string) (string, bool) { - return getDeprecatedAnnotation(annotations, v1beta1constants.GardenerOperation, ShootOperationDeprecated) -} - -func getDeprecatedAnnotation(annotations map[string]string, annotationKey, deprecatedAnnotationKey string) (string, bool) { - val, ok := annotations[annotationKey] - if !ok { - val, ok = annotations[deprecatedAnnotationKey] - } - - return val, ok -} - // CheckIfDeletionIsConfirmed returns whether the deletion of an object is confirmed or not. func CheckIfDeletionIsConfirmed(obj metav1.Object) error { annotations := obj.GetAnnotations() @@ -745,8 +708,8 @@ func CheckIfDeletionIsConfirmed(obj metav1.Object) error { return annotationRequiredError() } - value, _ := GetConfirmationDeletionAnnotation(annotations) - if true, err := strconv.ParseBool(value); err != nil || !true { + value := annotations[ConfirmationDeletion] + if confirmed, err := strconv.ParseBool(value); err != nil || !confirmed { return annotationRequiredError() } return nil diff --git a/vendor/github.com/gardener/gardener/pkg/operation/garden/garden.go b/vendor/github.com/gardener/gardener/pkg/operation/garden/garden.go index a58274ba4..0b8e3b07d 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/garden/garden.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/garden/garden.go @@ -291,14 +291,14 @@ func ReadGardenSecrets(k8sInformers kubeinformers.SharedInformerFactory, k8sGard // VerifyInternalDomainSecret verifies that the internal domain secret matches to the internal domain secret used for // existing Shoot clusters. It is not allowed to change the internal domain secret if there are existing Shoot clusters. -func VerifyInternalDomainSecret(k8sGardenClient kubernetes.Interface, numberOfShoots int, internalDomainSecret *corev1.Secret) error { +func VerifyInternalDomainSecret(ctx context.Context, k8sGardenClient kubernetes.Interface, numberOfShoots int, internalDomainSecret *corev1.Secret) error { _, currentDomain, _, _, err := common.GetDomainInfoFromAnnotations(internalDomainSecret.Annotations) if err != nil { return fmt.Errorf("error getting information out of current internal domain secret: %+v", err) } internalConfigMap := &corev1.ConfigMap{} - err = k8sGardenClient.Client().Get(context.TODO(), kutil.Key(v1beta1constants.GardenNamespace, common.ControllerManagerInternalConfigMapName), internalConfigMap) + err = k8sGardenClient.Client().Get(ctx, kutil.Key(v1beta1constants.GardenNamespace, common.ControllerManagerInternalConfigMapName), internalConfigMap) if apierrors.IsNotFound(err) || numberOfShoots == 0 { configMap := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -307,7 +307,7 @@ func VerifyInternalDomainSecret(k8sGardenClient kubernetes.Interface, numberOfSh }, } - _, err := controllerutil.CreateOrUpdate(context.TODO(), k8sGardenClient.Client(), configMap, func() error { + _, err := controllerutil.CreateOrUpdate(ctx, k8sGardenClient.Client(), configMap, func() error { configMap.Data = map[string]string{ common.GardenRoleInternalDomain: currentDomain, } @@ -328,7 +328,7 @@ func VerifyInternalDomainSecret(k8sGardenClient kubernetes.Interface, numberOfSh } // BootstrapCluster bootstraps the Garden cluster and deploys various required manifests. -func BootstrapCluster(k8sGardenClient kubernetes.Interface, gardenNamespace string, secrets map[string]*corev1.Secret) error { +func BootstrapCluster(ctx context.Context, k8sGardenClient kubernetes.Interface, gardenNamespace string, secrets map[string]*corev1.Secret) error { // Check whether the Kubernetes version of the Garden cluster is at least 1.16 (least supported K8s version of Gardener). minGardenVersion := "1.16" gardenVersionOK, err := version.CompareVersions(k8sGardenClient.Version(), ">=", minGardenVersion) @@ -340,7 +340,7 @@ func BootstrapCluster(k8sGardenClient kubernetes.Interface, gardenNamespace stri } if secrets[common.GardenRoleGlobalMonitoring] == nil { var secret *corev1.Secret - if secret, err = generateMonitoringSecret(k8sGardenClient, gardenNamespace); err != nil { + if secret, err = generateMonitoringSecret(ctx, k8sGardenClient, gardenNamespace); err != nil { return err } secrets[common.GardenRoleGlobalMonitoring] = secret @@ -349,7 +349,7 @@ func BootstrapCluster(k8sGardenClient kubernetes.Interface, gardenNamespace stri return nil } -func generateMonitoringSecret(k8sGardenClient kubernetes.Interface, gardenNamespace string) (*corev1.Secret, error) { +func generateMonitoringSecret(ctx context.Context, k8sGardenClient kubernetes.Interface, gardenNamespace string) (*corev1.Secret, error) { basicAuthSecret := &secretutils.BasicAuthSecretConfig{ Name: "monitoring-ingress-credentials", Format: secretutils.BasicAuthFormatNormal, @@ -368,7 +368,7 @@ func generateMonitoringSecret(k8sGardenClient kubernetes.Interface, gardenNamesp Namespace: gardenNamespace, }, } - if _, err := controllerutil.CreateOrUpdate(context.TODO(), k8sGardenClient.Client(), secret, func() error { + if _, err := controllerutil.CreateOrUpdate(ctx, k8sGardenClient.Client(), secret, func() error { secret.Labels = map[string]string{ v1beta1constants.GardenRole: common.GardenRoleGlobalMonitoring, } diff --git a/vendor/github.com/gardener/gardener/pkg/operation/operation.go b/vendor/github.com/gardener/gardener/pkg/operation/operation.go index e38da3711..629f1213b 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/operation.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/operation.go @@ -276,12 +276,12 @@ func (b *Builder) Build(ctx context.Context, clientMap clientmap.ClientMap) (*Op // cluster which contains a Kubeconfig that can be used to authenticate against the Seed cluster. With it, // a Kubernetes client as well as a Chart renderer for the Seed cluster will be initialized and attached to // the already existing Operation object. -func (o *Operation) InitializeSeedClients() error { +func (o *Operation) InitializeSeedClients(ctx context.Context) error { if o.K8sSeedClient != nil { return nil } - seedClient, err := o.ClientMap.GetClient(context.TODO(), keys.ForSeed(o.Seed.Info)) + seedClient, err := o.ClientMap.GetClient(ctx, keys.ForSeed(o.Seed.Info)) if err != nil { return fmt.Errorf("failed to get seed client: %w", err) } @@ -489,7 +489,7 @@ func (o *Operation) SaveGardenerResourcesInShootState(ctx context.Context, resou // DeleteClusterResourceFromSeed deletes the `Cluster` extension resource for the shoot in the seed cluster. func (o *Operation) DeleteClusterResourceFromSeed(ctx context.Context) error { - if err := o.InitializeSeedClients(); err != nil { + if err := o.InitializeSeedClients(ctx); err != nil { o.Logger.Errorf("Could not initialize a new Kubernetes client for the seed cluster: %s", err.Error()) return err } diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/event.go b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/configurator/configurator.go similarity index 58% rename from vendor/github.com/gardener/gardener/pkg/utils/kubernetes/event.go rename to vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/configurator/configurator.go index 8280af600..f5414f23f 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/event.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/configurator/configurator.go @@ -12,24 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -package kubernetes +package configurator -import corev1 "k8s.io/api/core/v1" - -// SortableEvents implements sort.Interface for []api.Event based on the Timestamp field. -type SortableEvents []corev1.Event - -// Len implements sort.Interface. -func (list SortableEvents) Len() int { - return len(list) +// Configurator provides config to the scheduler. +type Configurator interface { + Config() (config string, sha256Hash string, err error) } -// Swap implements sort.Interface. -func (list SortableEvents) Swap(i, j int) { - list[i], list[j] = list[j], list[i] -} +// NoOp does nothing. +func NoOp() Configurator { return &noOp{} } -// Less implements sort.Interface. -func (list SortableEvents) Less(i, j int) bool { - return list[i].LastTimestamp.Time.Before(list[j].LastTimestamp.Time) -} +type noOp struct{} + +func (n *noOp) Config() (string, string, error) { return "", "", nil } diff --git a/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/configurator/doc.go b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/configurator/doc.go new file mode 100644 index 000000000..c2d496bc1 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/configurator/doc.go @@ -0,0 +1,17 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package configurator contains the Configurator interface +// It is a separate package to avoid dependency cycle. +package configurator diff --git a/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/gardener_kube_scheduler.go b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/gardener_kube_scheduler.go new file mode 100644 index 000000000..e79783593 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/gardener_kube_scheduler.go @@ -0,0 +1,421 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheduler + +import ( + "context" + "fmt" + "time" + + v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants" + "github.com/gardener/gardener/pkg/client/kubernetes" + "github.com/gardener/gardener/pkg/operation/botanist/component" + "github.com/gardener/gardener/pkg/operation/common" + "github.com/gardener/gardener/pkg/operation/seed/scheduler/configurator" + "github.com/gardener/gardener/pkg/utils" + kutil "github.com/gardener/gardener/pkg/utils/kubernetes" + "github.com/gardener/gardener/pkg/utils/managedresources" + + "github.com/pkg/errors" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + appsv1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + corev1 "k8s.io/api/core/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + autoscalingv1beta2 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +const ( + name = "gardener-kube-scheduler" + containerName = "kube-scheduler" + portNameMetrics = "metrics" + dataKeyComponentConfig = "config.yaml" + kubeSchedulerClusterRoleBindingName = "gardener.cloud:kube-scheduler" + volumeSchedulerClusterRoleBindingName = "gardener.cloud:volume-scheduler" + roleBindingName = "gardener.cloud:kube-scheduler:extension-apiserver-authentication-reader" + roleName = "extension-apiserver-authentication-reader" + kubeSchedulerClusterRoleName = "system:kube-scheduler" + volumeSchedulerClusterRoleName = "system:volume-scheduler" + webhookName = "kube-scheduler.scheduling.gardener.cloud" + volumeMountPathConfig = "/var/lib/kube-scheduler-config" +) + +// New creates a new instance of DeployWaiter for the kube-scheduler. +// It requires Seed cluster with version 1.18 or 1.19. +func New( + client client.Client, + namespace string, + image string, + config configurator.Configurator, + webhookClientConfig *admissionregistrationv1beta1.WebhookClientConfig, +) ( + component.DeployWaiter, + error, +) { + if client == nil { + return nil, errors.New("client is required") + } + + if len(namespace) == 0 { + return nil, errors.New("namespace is required") + } + + if namespace == v1beta1constants.GardenNamespace { + return nil, errors.New("namespace cannot be 'garden'") + } + + if len(image) == 0 { + return nil, errors.New("image is required") + } + + if config == nil { + return nil, errors.New("config is required") + } + + if webhookClientConfig == nil { + return nil, errors.New("webhookClientConfig is required") + } + + s := &kubeScheduler{ + client: client, + namespace: namespace, + image: image, + + config: config, + webhookClientConfig: webhookClientConfig, + } + + return s, nil +} + +type kubeScheduler struct { + client client.Client + namespace string + image string + config configurator.Configurator + webhookClientConfig *admissionregistrationv1beta1.WebhookClientConfig +} + +func (k *kubeScheduler) Deploy(ctx context.Context) error { + componentConfigYAML, componentConfigChecksum, err := k.config.Config() + if err != nil { + return errors.Wrap(err, "generate component config failed") + } + + const ( + port int32 = 10259 + configVolumeName string = "config" + ) + + var ( + failPolicy = admissionregistrationv1beta1.Ignore + matchPolicy = admissionregistrationv1beta1.Exact + revocationPolicy = admissionregistrationv1beta1.NeverReinvocationPolicy + timeout int32 = 2 + sideEffects = admissionregistrationv1beta1.SideEffectClassNone + scope = admissionregistrationv1beta1.NamespacedScope + updateMode = autoscalingv1beta2.UpdateModeAuto + minAvailable = intstr.FromInt(1) + + namespace = &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{ + Name: k.namespace, + Labels: getLabels(), + }} + kubeSchedulerClusterRoleBinding = &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: kubeSchedulerClusterRoleBindingName, + Labels: getLabels(), + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: kubeSchedulerClusterRoleName, + }, + Subjects: []rbacv1.Subject{{ + Kind: rbacv1.ServiceAccountKind, + Name: name, + Namespace: k.namespace, + }}, + } + volumeSchedulerClusterRoleBinding = &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: volumeSchedulerClusterRoleBindingName, + Labels: getLabels(), + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: volumeSchedulerClusterRoleName, + }, + Subjects: []rbacv1.Subject{{ + Kind: rbacv1.ServiceAccountKind, + Name: name, + Namespace: k.namespace, + }}, + } + configMap = &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: k.namespace, + Labels: getLabels(), + }, + Data: map[string]string{dataKeyComponentConfig: componentConfigYAML}, + } + deployment = &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: k.namespace, + Labels: getLabels(), + }, + Spec: appsv1.DeploymentSpec{ + Replicas: pointer.Int32Ptr(2), + RevisionHistoryLimit: pointer.Int32Ptr(0), + Selector: &metav1.LabelSelector{MatchLabels: getLabels()}, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "checksum/configmap-componentconfig": componentConfigChecksum, + }, + Labels: getLabels(), + }, + Spec: corev1.PodSpec{ + Affinity: &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{{ + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{ + TopologyKey: corev1.LabelHostname, + LabelSelector: &metav1.LabelSelector{MatchLabels: getLabels()}, + }, + }}, + }, + }, + ServiceAccountName: name, + Containers: []corev1.Container{ + { + Name: containerName, + Image: k.image, + ImagePullPolicy: corev1.PullIfNotPresent, + Command: k.command(port), + LivenessProbe: &corev1.Probe{ + Handler: corev1.Handler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Scheme: corev1.URISchemeHTTPS, + Port: intstr.FromInt(int(port)), + }, + }, + SuccessThreshold: 1, + FailureThreshold: 2, + InitialDelaySeconds: 15, + PeriodSeconds: 10, + TimeoutSeconds: 15, + }, + Ports: []corev1.ContainerPort{ + { + Name: portNameMetrics, + ContainerPort: port, + Protocol: corev1.ProtocolTCP, + }, + }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("23m"), + corev1.ResourceMemory: resource.MustParse("64Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("400m"), + corev1.ResourceMemory: resource.MustParse("512Mi"), + }, + }, + VolumeMounts: []corev1.VolumeMount{ + { + Name: configVolumeName, + MountPath: volumeMountPathConfig, + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: configVolumeName, + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMap.Name, + }, + }, + }, + }, + }, + }, + }, + }, + } + serviceAccount = &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: k.namespace, + Labels: getLabels(), + }} + roleBinding = &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: roleBindingName, + Namespace: metav1.NamespaceSystem, + Labels: getLabels(), + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "Role", + Name: roleName, + }, + Subjects: []rbacv1.Subject{{ + Kind: rbacv1.ServiceAccountKind, + Name: name, + Namespace: k.namespace, + }}, + } + webhook = &admissionregistrationv1beta1.MutatingWebhookConfiguration{ + ObjectMeta: metav1.ObjectMeta{ + Name: webhookName, + Labels: getLabels(), + }, + Webhooks: []admissionregistrationv1beta1.MutatingWebhook{{ + Name: webhookName, + ClientConfig: *k.webhookClientConfig, + Rules: []admissionregistrationv1beta1.RuleWithOperations{{ + Operations: []admissionregistrationv1beta1.OperationType{admissionregistrationv1beta1.Create}, + Rule: admissionregistrationv1beta1.Rule{ + APIGroups: []string{corev1.GroupName}, + APIVersions: []string{corev1.SchemeGroupVersion.Version}, + Scope: &scope, + Resources: []string{"pods"}, + }, + }}, + FailurePolicy: &failPolicy, + MatchPolicy: &matchPolicy, + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + v1beta1constants.GardenRole: v1beta1constants.GardenRoleShoot, + }, + }, + ObjectSelector: &metav1.LabelSelector{}, + SideEffects: &sideEffects, + TimeoutSeconds: &timeout, + AdmissionReviewVersions: []string{admissionregistrationv1beta1.SchemeGroupVersion.Version}, + ReinvocationPolicy: &revocationPolicy, + }}, + } + vpa = &autoscalingv1beta2.VerticalPodAutoscaler{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: k.namespace, + Labels: getLabels(), + }, + Spec: autoscalingv1beta2.VerticalPodAutoscalerSpec{ + TargetRef: &autoscalingv1.CrossVersionObjectReference{ + APIVersion: appsv1.SchemeGroupVersion.String(), + Kind: "Deployment", + Name: deployment.Name, + }, + UpdatePolicy: &autoscalingv1beta2.PodUpdatePolicy{ + UpdateMode: &updateMode, + }, + }, + } + podDisruptionBudget = &policyv1beta1.PodDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: k.namespace, + Labels: getLabels(), + }, + Spec: policyv1beta1.PodDisruptionBudgetSpec{ + MinAvailable: &minAvailable, + Selector: &metav1.LabelSelector{ + MatchLabels: getLabels(), + }, + }, + } + + registry = managedresources.NewRegistry(kubernetes.SeedScheme, kubernetes.SeedCodec, kubernetes.SeedSerializer) + ) + + if _, err := controllerutil.CreateOrUpdate(ctx, k.client, namespace, func() error { + namespace.Labels = utils.MergeStringMaps(namespace.Labels, getLabels()) + return nil + }); err != nil { + return errors.Wrap(err, "update of Namespace failed") + } + + resources, err := registry.AddAllAndSerialize( + kubeSchedulerClusterRoleBinding, + volumeSchedulerClusterRoleBinding, + roleBinding, + serviceAccount, + configMap, + deployment, + webhook, + vpa, + podDisruptionBudget, + ) + if err != nil { + return err + } + + return common.DeployManagedResourceForSeed(ctx, k.client, "gardener-kube-scheduler", k.namespace, false, resources) +} + +func getLabels() map[string]string { + return map[string]string{ + "app": "kubernetes", + "role": "scheduler", + } +} + +func (k *kubeScheduler) Destroy(ctx context.Context) error { + return client.IgnoreNotFound(k.client.Delete(ctx, &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: k.namespace}})) +} + +func (k *kubeScheduler) Wait(ctx context.Context) error { + timeoutCtx, cancel := context.WithTimeout(ctx, time.Minute*2) + defer cancel() + + return managedresources.WaitUntilManagedResourceHealthy(timeoutCtx, k.client, k.namespace, "gardener-kube-scheduler") +} + +func (k *kubeScheduler) WaitCleanup(ctx context.Context) error { + return kutil.WaitUntilResourceDeleted( + ctx, + k.client, + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: k.namespace}}, + time.Second*2, + ) +} + +func (k *kubeScheduler) command(port int32) []string { + return []string{ + "/usr/local/bin/kube-scheduler", + fmt.Sprintf("--config=%s/%s", volumeMountPathConfig, dataKeyComponentConfig), + fmt.Sprintf("--secure-port=%d", port), + "--port=0", + "--v=2", + } +} diff --git a/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/gardener_kube_scheduler_bootstrap.go b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/gardener_kube_scheduler_bootstrap.go new file mode 100644 index 000000000..1f0ab5c3f --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/gardener_kube_scheduler_bootstrap.go @@ -0,0 +1,159 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheduler + +import ( + "github.com/gardener/gardener/pkg/features" + gardenletfeatures "github.com/gardener/gardener/pkg/gardenlet/features" + "github.com/gardener/gardener/pkg/operation/botanist/component" + "github.com/gardener/gardener/pkg/operation/botanist/seedsystemcomponents/seedadmission" + "github.com/gardener/gardener/pkg/operation/seed/scheduler/configurator" + schedulerconfigv18 "github.com/gardener/gardener/pkg/operation/seed/scheduler/v18" + schedulerconfigv19 "github.com/gardener/gardener/pkg/operation/seed/scheduler/v19" + schedulerconfigv20 "github.com/gardener/gardener/pkg/operation/seed/scheduler/v20" + seedadmissionpkg "github.com/gardener/gardener/pkg/seedadmission" + "github.com/gardener/gardener/pkg/utils/imagevector" + schedulerconfigv18v1alpha2 "github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2" + schedulerconfigv19v1beta1 "github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1" + schedulerconfigv20v1beta1 "github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1" + + "github.com/Masterminds/semver" + "github.com/pkg/errors" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Bootstrap is used to bootstrap gardener-kube-scheduler in Seed clusters. +func Bootstrap( + c client.Client, + seedAdmissionControllerNamespace string, + image *imagevector.Image, + seedVersion *semver.Version, +) ( + component.DeployWaiter, + error, +) { + const ( + namespace = "gardener-kube-scheduler" + resourceName = namespace + ) + + if c == nil { + return nil, errors.New("client is required") + } + + if image == nil { + return nil, errors.New("image is required") + } + + if len(seedAdmissionControllerNamespace) == 0 { + return nil, errors.New("seedAdmissionControllerNamespace is required") + } + + if seedVersion == nil { + return nil, errors.New("seedVersion is required") + } + + var ( + config = configurator.NoOp() + err error + supportedVersion = true + ) + + switch { + case versionConstraintEqual118.Check(seedVersion): + config, err = schedulerconfigv18.NewConfigurator(resourceName, namespace, &schedulerconfigv18v1alpha2.KubeSchedulerConfiguration{ + Profiles: []schedulerconfigv18v1alpha2.KubeSchedulerProfile{{ + SchedulerName: pointer.StringPtr(seedadmissionpkg.GardenerShootControlPlaneSchedulerName), + Plugins: &schedulerconfigv18v1alpha2.Plugins{ + Score: &schedulerconfigv18v1alpha2.PluginSet{ + Disabled: []schedulerconfigv18v1alpha2.Plugin{ + {Name: "NodeResourcesLeastAllocated"}, + {Name: "NodeResourcesBalancedAllocation"}, + }, + Enabled: []schedulerconfigv18v1alpha2.Plugin{ + {Name: "NodeResourcesMostAllocated"}, + }, + }, + }, + }}, + }) + case versionConstraintEqual119.Check(seedVersion): + config, err = schedulerconfigv19.NewConfigurator(resourceName, namespace, &schedulerconfigv19v1beta1.KubeSchedulerConfiguration{ + Profiles: []schedulerconfigv19v1beta1.KubeSchedulerProfile{{ + SchedulerName: pointer.StringPtr(seedadmissionpkg.GardenerShootControlPlaneSchedulerName), + Plugins: &schedulerconfigv19v1beta1.Plugins{ + Score: &schedulerconfigv19v1beta1.PluginSet{ + Disabled: []schedulerconfigv19v1beta1.Plugin{ + {Name: "NodeResourcesLeastAllocated"}, + {Name: "NodeResourcesBalancedAllocation"}, + }, + Enabled: []schedulerconfigv19v1beta1.Plugin{ + {Name: "NodeResourcesMostAllocated"}, + }, + }, + }, + }}, + }) + case versionConstraintEqual120.Check(seedVersion): + config, err = schedulerconfigv20.NewConfigurator(resourceName, namespace, &schedulerconfigv20v1beta1.KubeSchedulerConfiguration{ + Profiles: []schedulerconfigv20v1beta1.KubeSchedulerProfile{{ + SchedulerName: pointer.StringPtr(seedadmissionpkg.GardenerShootControlPlaneSchedulerName), + Plugins: &schedulerconfigv20v1beta1.Plugins{ + Score: &schedulerconfigv20v1beta1.PluginSet{ + Disabled: []schedulerconfigv20v1beta1.Plugin{ + {Name: "NodeResourcesLeastAllocated"}, + {Name: "NodeResourcesBalancedAllocation"}, + }, + Enabled: []schedulerconfigv20v1beta1.Plugin{ + {Name: "NodeResourcesMostAllocated"}, + }, + }, + }, + }}, + }) + default: + supportedVersion = false + } + + if err != nil { + return nil, err + } + + scheduler, err := New( + c, + namespace, + image.String(), + config, + &admissionregistrationv1beta1.WebhookClientConfig{ + Service: &admissionregistrationv1beta1.ServiceReference{ + Name: seedadmission.Name, + Namespace: seedAdmissionControllerNamespace, + Path: pointer.StringPtr(seedadmissionpkg.GardenerShootControlPlaneSchedulerWebhookPath), + }, + CABundle: []byte(seedadmission.TLSCACert), + }, + ) + if err != nil { + return nil, err + } + + if supportedVersion && gardenletfeatures.FeatureGate.Enabled(features.SeedKubeScheduler) { + return scheduler, nil + } + + return component.OpDestroy(scheduler), nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/types.go b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/types.go new file mode 100644 index 000000000..5e8c80cfb --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/types.go @@ -0,0 +1,39 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scheduler + +import ( + "github.com/Masterminds/semver" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var ( + versionConstraintEqual118 *semver.Constraints + versionConstraintEqual119 *semver.Constraints + versionConstraintEqual120 *semver.Constraints +) + +func init() { + var err error + + versionConstraintEqual118, err = semver.NewConstraint("1.18.x") + utilruntime.Must(err) + + versionConstraintEqual119, err = semver.NewConstraint("1.19.x") + utilruntime.Must(err) + + versionConstraintEqual120, err = semver.NewConstraint("1.20.x") + utilruntime.Must(err) +} diff --git a/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v18/configurator.go b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v18/configurator.go new file mode 100644 index 000000000..8b03ab93c --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v18/configurator.go @@ -0,0 +1,79 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v18 + +import ( + "bytes" + "fmt" + "time" + + "github.com/gardener/gardener/pkg/operation/seed/scheduler/configurator" + "github.com/gardener/gardener/pkg/utils" + schedulerv18v1alpha2 "github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" + "k8s.io/utils/pointer" +) + +type v18Configurator struct { + config *schedulerv18v1alpha2.KubeSchedulerConfiguration + codec serializer.CodecFactory +} + +// NewConfigurator creates a Configurator for Kubernetes version 1.18. +func NewConfigurator(resourceName, namespace string, config *schedulerv18v1alpha2.KubeSchedulerConfiguration) (configurator.Configurator, error) { + scheme := runtime.NewScheme() + + if err := schedulerv18v1alpha2.AddToScheme(scheme); err != nil { + return nil, err + } + + config.LeaderElection.LeaderElectionConfiguration = componentbaseconfigv1alpha1.LeaderElectionConfiguration{ + LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, + RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, + RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, + ResourceLock: "leases", + ResourceName: resourceName, + LeaderElect: pointer.BoolPtr(true), + ResourceNamespace: namespace, + } + + return &v18Configurator{ + config: config, + codec: serializer.NewCodecFactory(scheme, serializer.EnableStrict), + }, nil +} + +func (c *v18Configurator) Config() (string, string, error) { + const mediaType = runtime.ContentTypeYAML + + componentConfigYAML := &bytes.Buffer{} + + info, ok := runtime.SerializerInfoForMediaType(c.codec.SupportedMediaTypes(), mediaType) + if !ok { + return "", "", fmt.Errorf("unable to locate encoder -- %q is not a supported media type", mediaType) + } + + encoder := c.codec.EncoderForVersion(info.Serializer, schedulerv18v1alpha2.SchemeGroupVersion) + + if err := encoder.Encode(c.config, componentConfigYAML); err != nil { + return "", "", err + } + + return componentConfigYAML.String(), utils.ComputeSHA256Hex(componentConfigYAML.Bytes()), nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v18/doc.go b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v18/doc.go new file mode 100644 index 000000000..71aa2b7e5 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v18/doc.go @@ -0,0 +1,18 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v18 a kube-scheduler specific configuration for +// 1.18 Kubernetes version. The API types used by the scheduler +// are in the v1alpha1 subpackage. +package v18 diff --git a/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v19/configurator.go b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v19/configurator.go new file mode 100644 index 000000000..92644b087 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v19/configurator.go @@ -0,0 +1,79 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v19 + +import ( + "bytes" + "fmt" + "time" + + "github.com/gardener/gardener/pkg/operation/seed/scheduler/configurator" + "github.com/gardener/gardener/pkg/utils" + schedulerv19v1beta1 "github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" + "k8s.io/utils/pointer" +) + +type v19Configurator struct { + config *schedulerv19v1beta1.KubeSchedulerConfiguration + codec serializer.CodecFactory +} + +// NewConfigurator creates a Configurator for Kubernetes version 1.19. +func NewConfigurator(resourceName, namespace string, config *schedulerv19v1beta1.KubeSchedulerConfiguration) (configurator.Configurator, error) { + scheme := runtime.NewScheme() + + if err := schedulerv19v1beta1.AddToScheme(scheme); err != nil { + return nil, err + } + + config.LeaderElection = componentbaseconfigv1alpha1.LeaderElectionConfiguration{ + LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, + RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, + RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, + ResourceLock: "leases", + ResourceName: resourceName, + LeaderElect: pointer.BoolPtr(true), + ResourceNamespace: namespace, + } + + return &v19Configurator{ + config: config, + codec: serializer.NewCodecFactory(scheme, serializer.EnableStrict), + }, nil +} + +func (c *v19Configurator) Config() (string, string, error) { + const mediaType = runtime.ContentTypeYAML + + componentConfigYAML := &bytes.Buffer{} + + info, ok := runtime.SerializerInfoForMediaType(c.codec.SupportedMediaTypes(), mediaType) + if !ok { + return "", "", fmt.Errorf("unable to locate encoder -- %q is not a supported media type", mediaType) + } + + encoder := c.codec.EncoderForVersion(info.Serializer, schedulerv19v1beta1.SchemeGroupVersion) + + if err := encoder.Encode(c.config, componentConfigYAML); err != nil { + return "", "", err + } + + return componentConfigYAML.String(), utils.ComputeSHA256Hex(componentConfigYAML.Bytes()), nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v19/doc.go b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v19/doc.go new file mode 100644 index 000000000..b849ef478 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v19/doc.go @@ -0,0 +1,18 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v19 a kube-scheduler specific configuration for +// 1.19 Kubernetes version. The API types used by the scheduler +// are in the v1beta1 subpackage. +package v19 diff --git a/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v20/configurator.go b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v20/configurator.go new file mode 100644 index 000000000..0ba96bc7c --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v20/configurator.go @@ -0,0 +1,79 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v20 + +import ( + "bytes" + "fmt" + "time" + + "github.com/gardener/gardener/pkg/operation/seed/scheduler/configurator" + "github.com/gardener/gardener/pkg/utils" + schedulerv20v1beta1 "github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" + "k8s.io/utils/pointer" +) + +type v20Configurator struct { + config *schedulerv20v1beta1.KubeSchedulerConfiguration + codec serializer.CodecFactory +} + +// NewConfigurator creates a Configurator for Kubernetes version 1.20. +func NewConfigurator(resourceName, namespace string, config *schedulerv20v1beta1.KubeSchedulerConfiguration) (configurator.Configurator, error) { + scheme := runtime.NewScheme() + + if err := schedulerv20v1beta1.AddToScheme(scheme); err != nil { + return nil, err + } + + config.LeaderElection = componentbaseconfigv1alpha1.LeaderElectionConfiguration{ + LeaseDuration: metav1.Duration{Duration: 15 * time.Second}, + RenewDeadline: metav1.Duration{Duration: 10 * time.Second}, + RetryPeriod: metav1.Duration{Duration: 2 * time.Second}, + ResourceLock: "leases", + ResourceName: resourceName, + LeaderElect: pointer.BoolPtr(true), + ResourceNamespace: namespace, + } + + return &v20Configurator{ + config: config, + codec: serializer.NewCodecFactory(scheme, serializer.EnableStrict), + }, nil +} + +func (c *v20Configurator) Config() (string, string, error) { + const mediaType = runtime.ContentTypeYAML + + componentConfigYAML := &bytes.Buffer{} + + info, ok := runtime.SerializerInfoForMediaType(c.codec.SupportedMediaTypes(), mediaType) + if !ok { + return "", "", fmt.Errorf("unable to locate encoder -- %q is not a supported media type", mediaType) + } + + encoder := c.codec.EncoderForVersion(info.Serializer, schedulerv20v1beta1.SchemeGroupVersion) + + if err := encoder.Encode(c.config, componentConfigYAML); err != nil { + return "", "", err + } + + return componentConfigYAML.String(), utils.ComputeSHA256Hex(componentConfigYAML.Bytes()), nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v20/doc.go b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v20/doc.go new file mode 100644 index 000000000..07d39d4d3 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/operation/seed/scheduler/v20/doc.go @@ -0,0 +1,18 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v20 a kube-scheduler specific configuration for +// 1.20 Kubernetes version. The API types used by the scheduler +// are in the v1beta1 subpackage. +package v20 diff --git a/vendor/github.com/gardener/gardener/pkg/operation/seed/seed.go b/vendor/github.com/gardener/gardener/pkg/operation/seed/seed.go index 57f35c200..cda14bff1 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/seed/seed.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/seed/seed.go @@ -43,6 +43,7 @@ import ( "github.com/gardener/gardener/pkg/operation/botanist/systemcomponents/metricsserver" "github.com/gardener/gardener/pkg/operation/common" "github.com/gardener/gardener/pkg/operation/seed/istio" + "github.com/gardener/gardener/pkg/operation/seed/scheduler" "github.com/gardener/gardener/pkg/utils" "github.com/gardener/gardener/pkg/utils/chart" "github.com/gardener/gardener/pkg/utils/flow" @@ -191,8 +192,8 @@ func generateWantedSecrets(seed *Seed, certificateAuthorities map[string]*secret // deployCertificates deploys CA and TLS certificates inside the garden namespace // It takes a map[string]*corev1.Secret object which contains secrets that have already been deployed inside that namespace to avoid duplication errors. -func deployCertificates(seed *Seed, k8sSeedClient kubernetes.Interface, existingSecretsMap map[string]*corev1.Secret) (map[string]*corev1.Secret, error) { - _, certificateAuthorities, err := secretsutils.GenerateCertificateAuthorities(k8sSeedClient, existingSecretsMap, wantedCertificateAuthorities, v1beta1constants.GardenNamespace) +func deployCertificates(ctx context.Context, seed *Seed, k8sSeedClient kubernetes.Interface, existingSecretsMap map[string]*corev1.Secret) (map[string]*corev1.Secret, error) { + _, certificateAuthorities, err := secretsutils.GenerateCertificateAuthorities(ctx, k8sSeedClient, existingSecretsMap, wantedCertificateAuthorities, v1beta1constants.GardenNamespace) if err != nil { return nil, err } @@ -211,14 +212,14 @@ func deployCertificates(seed *Seed, k8sSeedClient kubernetes.Interface, existing for name, secret := range existingSecretsMap { _, ok := secret.Labels[renewedLabel] if browserCerts.Has(name) && !ok { - if err := k8sSeedClient.Client().Delete(context.TODO(), secret); client.IgnoreNotFound(err) != nil { + if err := k8sSeedClient.Client().Delete(ctx, secret); client.IgnoreNotFound(err) != nil { return nil, err } delete(existingSecretsMap, name) } } - secrets, err := secretsutils.GenerateClusterSecrets(context.TODO(), k8sSeedClient, existingSecretsMap, wantedSecretsList, v1beta1constants.GardenNamespace) + secrets, err := secretsutils.GenerateClusterSecrets(ctx, k8sSeedClient, existingSecretsMap, wantedSecretsList, v1beta1constants.GardenNamespace) if err != nil { return nil, err } @@ -233,7 +234,7 @@ func deployCertificates(seed *Seed, k8sSeedClient kubernetes.Interface, existing } secret.Labels[renewedLabel] = "true" - if err := k8sSeedClient.Client().Update(context.TODO(), secret); err != nil { + if err := k8sSeedClient.Client().Update(ctx, secret); err != nil { return nil, err } } @@ -488,7 +489,7 @@ func BootstrapCluster(ctx context.Context, k8sGardenClient, k8sSeedClient kubern existingSecretsMap[secret.ObjectMeta.Name] = &secretObj } - deployedSecretsMap, err := deployCertificates(seed, k8sSeedClient, existingSecretsMap) + deployedSecretsMap, err := deployCertificates(ctx, seed, k8sSeedClient, existingSecretsMap) if err != nil { return err } @@ -887,6 +888,22 @@ func bootstrapComponents(c kubernetes.Interface, namespace string, imageVector i } components = append(components, seedadmission.New(c.Client(), namespace, gsacImage.String(), kubernetesVersion)) + // gardener-seed-scheduler + schedulerImage := &imagevector.Image{} + if imageVector != nil { + schedulerImage, err = imageVector.FindImage(common.KubeSchedulerImageName, imagevector.TargetVersion(kubernetesVersion.String())) + if err != nil { + return nil, err + } + } + + sched, err := scheduler.Bootstrap(c.DirectClient(), namespace, schedulerImage, kubernetesVersion) + if err != nil { + return nil, err + } + + components = append(components, sched) + return components, nil } @@ -1199,8 +1216,8 @@ func deleteIngressController(ctx context.Context, c client.Client) error { return kutil.DeleteObjects( ctx, c, - &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "nginx-ingress"}}, - &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "nginx-ingress"}}, + &rbacv1.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: "gardener.cloud:seed:nginx-ingress"}}, + &rbacv1.ClusterRoleBinding{ObjectMeta: metav1.ObjectMeta{Name: "gardener.cloud:seed:nginx-ingress"}}, &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "nginx-ingress", Namespace: v1beta1constants.GardenNamespace}}, &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "nginx-ingress-controller", Namespace: v1beta1constants.GardenNamespace}}, &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "nginx-ingress-controller", Namespace: v1beta1constants.GardenNamespace}}, diff --git a/vendor/github.com/gardener/gardener/pkg/operation/shoot/shoot.go b/vendor/github.com/gardener/gardener/pkg/operation/shoot/shoot.go index 0443d94d5..5e42075a4 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/shoot/shoot.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/shoot/shoot.go @@ -20,7 +20,6 @@ import ( "net" "strconv" "strings" - "time" "github.com/gardener/gardener/pkg/apis/core" gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" @@ -30,6 +29,7 @@ import ( gardencorelisters "github.com/gardener/gardener/pkg/client/core/listers/core/v1beta1" "github.com/gardener/gardener/pkg/features" gardenletfeatures "github.com/gardener/gardener/pkg/gardenlet/features" + "github.com/gardener/gardener/pkg/operation/botanist/extensions/extension" "github.com/gardener/gardener/pkg/operation/common" "github.com/gardener/gardener/pkg/operation/garden" "github.com/gardener/gardener/pkg/utils" @@ -170,7 +170,7 @@ func (b *Builder) Build(ctx context.Context, c client.Client) (*Shoot, error) { SystemComponents: &SystemComponents{}, } - extensions, err := calculateExtensions(c, shootObject, shoot.SeedNamespace) + extensions, err := calculateExtensions(ctx, c, shootObject, shoot.SeedNamespace) if err != nil { return nil, fmt.Errorf("cannot calculate required extensions for shoot %s: %v", shootObject.Name, err) } @@ -226,9 +226,9 @@ func (b *Builder) Build(ctx context.Context, c client.Client) (*Shoot, error) { return shoot, nil } -func calculateExtensions(gardenClient client.Client, shoot *gardencorev1beta1.Shoot, seedNamespace string) (map[string]Extension, error) { - var controllerRegistrations = &gardencorev1beta1.ControllerRegistrationList{} - if err := gardenClient.List(context.TODO(), controllerRegistrations); err != nil { +func calculateExtensions(ctx context.Context, gardenClient client.Client, shoot *gardencorev1beta1.Shoot, seedNamespace string) (map[string]extension.Extension, error) { + controllerRegistrations := &gardencorev1beta1.ControllerRegistrationList{} + if err := gardenClient.List(ctx, controllerRegistrations); err != nil { return nil, err } return MergeExtensions(controllerRegistrations.Items, shoot.Spec.Extensions, seedNamespace) @@ -424,17 +424,13 @@ func ConstructExternalDomain(ctx context.Context, client client.Client, shoot *g return externalDomain, nil } -// ExtensionDefaultTimeout is the default timeout and defines how long Gardener should wait -// for a successful reconciliation of this extension resource. -const ExtensionDefaultTimeout = 3 * time.Minute - // MergeExtensions merges the given controller registrations with the given extensions, expecting that each type in // extensions is also represented in the registration. It ignores all extensions that were explicitly disabled in the // shoot spec. -func MergeExtensions(registrations []gardencorev1beta1.ControllerRegistration, extensions []gardencorev1beta1.Extension, namespace string) (map[string]Extension, error) { +func MergeExtensions(registrations []gardencorev1beta1.ControllerRegistration, extensions []gardencorev1beta1.Extension, namespace string) (map[string]extension.Extension, error) { var ( - typeToExtension = make(map[string]Extension) - requiredExtensions = make(map[string]Extension) + typeToExtension = make(map[string]extension.Extension) + requiredExtensions = make(map[string]extension.Extension) ) // Extensions enabled by default for all Shoot clusters. @@ -444,12 +440,12 @@ func MergeExtensions(registrations []gardencorev1beta1.ControllerRegistration, e continue } - timeout := ExtensionDefaultTimeout + timeout := extension.DefaultTimeout if res.ReconcileTimeout != nil { timeout = res.ReconcileTimeout.Duration } - typeToExtension[res.Type] = Extension{ + typeToExtension[res.Type] = extension.Extension{ Extension: extensionsv1alpha1.Extension{ ObjectMeta: metav1.ObjectMeta{ Name: res.Type, diff --git a/vendor/github.com/gardener/gardener/pkg/operation/shoot/types.go b/vendor/github.com/gardener/gardener/pkg/operation/shoot/types.go index 85cf19e00..bb23d0d86 100644 --- a/vendor/github.com/gardener/gardener/pkg/operation/shoot/types.go +++ b/vendor/github.com/gardener/gardener/pkg/operation/shoot/types.go @@ -17,7 +17,6 @@ package shoot import ( "context" "net" - "time" gardencorev1beta1 "github.com/gardener/gardener/pkg/apis/core/v1beta1" extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" @@ -26,6 +25,7 @@ import ( "github.com/gardener/gardener/pkg/operation/botanist/controlplane/etcd" "github.com/gardener/gardener/pkg/operation/botanist/controlplane/kubecontrollermanager" "github.com/gardener/gardener/pkg/operation/botanist/controlplane/kubescheduler" + "github.com/gardener/gardener/pkg/operation/botanist/extensions/extension" "github.com/gardener/gardener/pkg/operation/botanist/systemcomponents/metricsserver" "github.com/gardener/gardener/pkg/operation/etcdencryption" "github.com/gardener/gardener/pkg/operation/garden" @@ -77,10 +77,8 @@ type Shoot struct { Components *Components OperatingSystemConfigsMap map[string]OperatingSystemConfigs - Extensions map[string]Extension + Extensions map[string]extension.Extension InfrastructureStatus []byte - ControlPlaneStatus []byte - MachineDeployments []extensionsv1alpha1.MachineDeployment ETCDEncryption *etcdencryption.EncryptionConfig @@ -109,12 +107,14 @@ type ControlPlane struct { // Extensions contains references to extension resources. type Extensions struct { + ContainerRuntime ExtensionContainerRuntime ControlPlane ExtensionControlPlane ControlPlaneExposure ExtensionControlPlane DNS *DNS + Extension extension.Interface Infrastructure ExtensionInfrastructure Network component.DeployMigrateWaiter - ContainerRuntime ExtensionContainerRuntime + Worker ExtensionWorker } // SystemComponents contains references to system components. @@ -139,7 +139,7 @@ type DNS struct { // ExtensionInfrastructure contains references to an Infrastructure extension deployer and its generated provider // status. type ExtensionInfrastructure interface { - component.DeployWaiter + component.DeployMigrateWaiter SetSSHPublicKey([]byte) ProviderStatus() *runtime.RawExtension NodesCIDR() *string @@ -158,6 +158,15 @@ type ExtensionContainerRuntime interface { DeleteStaleResources(ctx context.Context) error } +// ExtensionWorker contains references to a Worker extension deployer. +type ExtensionWorker interface { + component.DeployMigrateWaiter + SetSSHPublicKey([]byte) + SetInfrastructureProviderStatus(*runtime.RawExtension) + SetOperatingSystemConfigMaps(map[string]OperatingSystemConfigs) + MachineDeployments() []extensionsv1alpha1.MachineDeployment +} + // Networks contains pre-calculated subnets and IP address for various components. type Networks struct { // Pods subnet @@ -190,12 +199,6 @@ type OperatingSystemConfigData struct { Units []string } -// Extension contains information about the extension api resouce as well as configuration information. -type Extension struct { - extensionsv1alpha1.Extension - Timeout time.Duration -} - // IncompleteDNSConfigError is a custom error type. type IncompleteDNSConfigError struct{} diff --git a/vendor/github.com/gardener/gardener/pkg/seedadmission/constants.go b/vendor/github.com/gardener/gardener/pkg/seedadmission/constants.go new file mode 100644 index 000000000..c02ada58d --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/seedadmission/constants.go @@ -0,0 +1,24 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seedadmission + +const ( + // GardenerShootControlPlaneSchedulerName is the name of the scheduler used for + // shoot control plane pods. + GardenerShootControlPlaneSchedulerName = "gardener-shoot-controlplane-scheduler" + // GardenerShootControlPlaneSchedulerWebhookPath is the path of the webhook server + // that sets the "gardener-shoot-controlplane-scheduler" schedulerName for Pods. + GardenerShootControlPlaneSchedulerWebhookPath = "/webhooks/default-pod-scheduler-name/" + GardenerShootControlPlaneSchedulerName +) diff --git a/vendor/github.com/gardener/gardener/pkg/seedadmission/extension_crds.go b/vendor/github.com/gardener/gardener/pkg/seedadmission/extension_crds.go new file mode 100644 index 000000000..586d9684f --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/seedadmission/extension_crds.go @@ -0,0 +1,129 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seedadmission + +import ( + "context" + "fmt" + "strconv" + "strings" + + extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" + gardenlogger "github.com/gardener/gardener/pkg/logger" + "github.com/gardener/gardener/pkg/operation/common" + + "github.com/sirupsen/logrus" + admissionv1beta1 "k8s.io/api/admission/v1beta1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ValidateExtensionDeletion validates whether it is allowed to delete extension CRDs or extension resources. +func ValidateExtensionDeletion(ctx context.Context, c client.Client, logger *logrus.Logger, request *admissionv1beta1.AdmissionRequest) error { + // Ignore all resources other than our expected ones + switch request.Resource { + case + metav1.GroupVersionResource{Group: apiextensionsv1beta1.SchemeGroupVersion.Group, Version: apiextensionsv1beta1.SchemeGroupVersion.Version, Resource: "customresourcedefinitions"}, + metav1.GroupVersionResource{Group: apiextensionsv1.SchemeGroupVersion.Group, Version: apiextensionsv1.SchemeGroupVersion.Version, Resource: "customresourcedefinitions"}, + + metav1.GroupVersionResource{Group: extensionsv1alpha1.SchemeGroupVersion.Group, Version: extensionsv1alpha1.SchemeGroupVersion.Version, Resource: "backupbuckets"}, + metav1.GroupVersionResource{Group: extensionsv1alpha1.SchemeGroupVersion.Group, Version: extensionsv1alpha1.SchemeGroupVersion.Version, Resource: "backupentries"}, + metav1.GroupVersionResource{Group: extensionsv1alpha1.SchemeGroupVersion.Group, Version: extensionsv1alpha1.SchemeGroupVersion.Version, Resource: "containerruntimes"}, + metav1.GroupVersionResource{Group: extensionsv1alpha1.SchemeGroupVersion.Group, Version: extensionsv1alpha1.SchemeGroupVersion.Version, Resource: "controlplanes"}, + metav1.GroupVersionResource{Group: extensionsv1alpha1.SchemeGroupVersion.Group, Version: extensionsv1alpha1.SchemeGroupVersion.Version, Resource: "extensions"}, + metav1.GroupVersionResource{Group: extensionsv1alpha1.SchemeGroupVersion.Group, Version: extensionsv1alpha1.SchemeGroupVersion.Version, Resource: "infrastructures"}, + metav1.GroupVersionResource{Group: extensionsv1alpha1.SchemeGroupVersion.Group, Version: extensionsv1alpha1.SchemeGroupVersion.Version, Resource: "networks"}, + metav1.GroupVersionResource{Group: extensionsv1alpha1.SchemeGroupVersion.Group, Version: extensionsv1alpha1.SchemeGroupVersion.Version, Resource: "operatingsystemconfigs"}, + metav1.GroupVersionResource{Group: extensionsv1alpha1.SchemeGroupVersion.Group, Version: extensionsv1alpha1.SchemeGroupVersion.Version, Resource: "workers"}: + default: + return nil + } + + var operation string + + obj, err := getRequestObject(ctx, c, request) + if err != nil { + return client.IgnoreNotFound(err) + } + + if strings.HasSuffix(obj.GetObjectKind().GroupVersionKind().Kind, "List") { + operation = "DELETECOLLECTION" + } else { + operation = "DELETE" + } + + entryLogger := gardenlogger. + NewFieldLogger(logger, "resource", fmt.Sprintf("%s/%s/%s", request.Kind.Group, request.Kind.Version, request.Kind.Kind)). + WithField("operation", operation). + WithField("namespace", request.Namespace) + + entryLogger.Info("Handling request") + return admitObjectDeletion(entryLogger, obj, request.Kind.Kind) +} + +// admitObjectDeletion checks if the object deletion is confirmed. If the given object is a list of objects then it +// performs the check for every single object. +func admitObjectDeletion(logger *logrus.Entry, obj runtime.Object, kind string) error { + if strings.HasSuffix(obj.GetObjectKind().GroupVersionKind().Kind, "List") { + return meta.EachListItem(obj, func(o runtime.Object) error { + return checkIfObjectDeletionIsConfirmed(logger, o, kind) + }) + } + return checkIfObjectDeletionIsConfirmed(logger, obj, kind) +} + +// checkIfObjectDeletionIsConfirmed checks if the object was annotated with the deletion confirmation. If it is a custom +// resource definition then it is only considered if the CRD has the deletion protection label. +func checkIfObjectDeletionIsConfirmed(logger *logrus.Entry, obj runtime.Object, kind string) error { + acc, err := meta.Accessor(obj) + if err != nil { + return err + } + + logger = logger.WithField("name", acc.GetName()) + + if kind == "CustomResourceDefinition" && !crdMustBeConsidered(logger, acc.GetLabels()) { + return nil + } + + if err := common.CheckIfDeletionIsConfirmed(acc); err != nil { + logger.Info("Deletion is not confirmed - preventing deletion") + return err + } + + logger.Info("Deletion is confirmed - allowing deletion") + return nil +} + +// TODO: This function can be removed once the minimum seed Kubernetes version is bumped to >= 1.15. In 1.15, webhook +// configurations may use object selectors, i.e., we can get rid of this custom filtering. +func crdMustBeConsidered(logger *logrus.Entry, labels map[string]string) bool { + val, ok := labels[common.GardenerDeletionProtected] + if !ok { + logger.Infof("Ignoring CRD because it has no %s label - allowing deletion", common.GardenerDeletionProtected) + return false + } + + if bool, _ := strconv.ParseBool(val); !bool { + logger.Infof("Admitting CRD deletion because %s label value is not true - allowing deletion", common.GardenerDeletionProtected) + return false + } + + return true +} diff --git a/vendor/github.com/gardener/gardener/pkg/seedadmission/utils.go b/vendor/github.com/gardener/gardener/pkg/seedadmission/utils.go new file mode 100644 index 000000000..59a393594 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/seedadmission/utils.go @@ -0,0 +1,63 @@ +// Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package seedadmission + +import ( + "context" + "encoding/json" + + kutil "github.com/gardener/gardener/pkg/utils/kubernetes" + admissionv1beta1 "k8s.io/api/admission/v1beta1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func getRequestObject(ctx context.Context, c client.Client, request *admissionv1beta1.AdmissionRequest) (runtime.Object, error) { + // Older Kubernetes versions don't provide the object neither in OldObject nor in the Object field. In this case + // we have to look it up ourselves. + var ( + obj runtime.Object + err error + ) + + switch { + case request.OldObject.Raw != nil: + o := &unstructured.Unstructured{} + err = json.Unmarshal(request.OldObject.Raw, o) + obj = o + + case request.Object.Raw != nil: + o := &unstructured.Unstructured{} + err = json.Unmarshal(request.Object.Raw, o) + obj = o + + case request.Name == "": + o := &unstructured.UnstructuredList{} + o.SetAPIVersion(request.Kind.Group + "/" + request.Kind.Version) + o.SetKind(request.Kind.Kind + "List") + err = c.List(ctx, o, client.InNamespace(request.Namespace)) + obj = o + + default: + o := &unstructured.Unstructured{} + o.SetAPIVersion(request.Kind.Group + "/" + request.Kind.Version) + o.SetKind(request.Kind.Kind) + err = c.Get(ctx, kutil.Key(request.Namespace, request.Name), o) + obj = o + } + + return obj, err +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/kubernetes.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/kubernetes.go index acc470f75..289d9ca40 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/kubernetes.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/kubernetes.go @@ -27,14 +27,18 @@ import ( "github.com/gardener/gardener/pkg/utils/retry" "github.com/sirupsen/logrus" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/duration" "k8s.io/apimachinery/pkg/util/intstr" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) // TruncateLabelValue truncates a string at 63 characters so it's suitable for a label value. @@ -317,7 +321,20 @@ func ReconcileServicePorts(existingPorts []corev1.ServicePort, desiredPorts []co } func buildEventsErrorMessage(events []corev1.Event) string { - sort.Sort(SortableEvents(events)) + sortByLastTimestamp := func(o1, o2 controllerutil.Object) bool { + obj1, ok1 := o1.(*corev1.Event) + obj2, ok2 := o2.(*corev1.Event) + + if !ok1 || !ok2 { + return false + } + + return obj1.LastTimestamp.Time.Before(obj2.LastTimestamp.Time) + } + + list := &corev1.EventList{Items: events} + SortBy(sortByLastTimestamp).Sort(list) + events = list.Items const eventsLimit = 2 if len(events) > eventsLimit { @@ -366,3 +383,164 @@ func translateMicroTimestampSince(timestamp metav1.MicroTime) string { return duration.HumanDuration(time.Since(timestamp.Time)) } + +// MergeOwnerReferences merges the newReferences with the list of existing references. +func MergeOwnerReferences(references []metav1.OwnerReference, newReferences ...metav1.OwnerReference) []metav1.OwnerReference { + uids := make(map[types.UID]struct{}) + for _, reference := range references { + uids[reference.UID] = struct{}{} + } + + for _, newReference := range newReferences { + if _, ok := uids[newReference.UID]; !ok { + references = append(references, newReference) + } + } + + return references +} + +// OwnedBy checks if the given object's owner reference contains an entry with the provided attributes. +func OwnedBy(obj runtime.Object, apiVersion, kind, name string, uid types.UID) bool { + acc, err := meta.Accessor(obj) + if err != nil { + return false + } + + for _, ownerReference := range acc.GetOwnerReferences() { + return ownerReference.APIVersion == apiVersion && + ownerReference.Kind == kind && + ownerReference.Name == name && + ownerReference.UID == uid + } + + return false +} + +// NewestObject returns the most recently created object based on the provided list object type. If a filter function +// is provided then it will be applied for each object right after listing all objects. If no object remains then nil +// is returned. The Items field in the list object will be populated with the result returned from the server after +// applying the filter function (if provided). +func NewestObject(ctx context.Context, c client.Client, listObj runtime.Object, filterFn func(runtime.Object) bool, listOpts ...client.ListOption) (runtime.Object, error) { + if !meta.IsListType(listObj) { + return nil, fmt.Errorf("provided is not a List type") + } + + if err := c.List(ctx, listObj, listOpts...); err != nil { + return nil, err + } + + if filterFn != nil { + var items []runtime.Object + + if err := meta.EachListItem(listObj, func(obj runtime.Object) error { + if filterFn(obj) { + items = append(items, obj) + } + return nil + }); err != nil { + return nil, err + } + + if err := meta.SetList(listObj, items); err != nil { + return nil, err + } + } + + if meta.LenList(listObj) == 0 { + return nil, nil + } + + ByCreationTimestamp().Sort(listObj) + + items, err := meta.ExtractList(listObj) + if err != nil { + return nil, err + } + + return items[meta.LenList(listObj)-1], nil +} + +// NewestPodForDeployment returns the most recently created Pod object for the given deployment. +func NewestPodForDeployment(ctx context.Context, c client.Client, deployment *appsv1.Deployment) (*corev1.Pod, error) { + listOpts := []client.ListOption{client.InNamespace(deployment.Namespace)} + if deployment.Spec.Selector != nil { + listOpts = append(listOpts, client.MatchingLabels(deployment.Spec.Selector.MatchLabels)) + } + + replicaSet, err := NewestObject( + ctx, + c, + &appsv1.ReplicaSetList{}, + func(obj runtime.Object) bool { + return OwnedBy(obj, appsv1.SchemeGroupVersion.String(), "Deployment", deployment.Name, deployment.UID) + }, + listOpts..., + ) + if err != nil { + return nil, err + } + if replicaSet == nil { + return nil, nil + } + + newestReplicaSet, ok := replicaSet.(*appsv1.ReplicaSet) + if !ok { + return nil, fmt.Errorf("object is not of type *appsv1.ReplicaSet but %T", replicaSet) + } + + pod, err := NewestObject( + ctx, + c, + &corev1.PodList{}, + func(obj runtime.Object) bool { + return OwnedBy(obj, appsv1.SchemeGroupVersion.String(), "ReplicaSet", newestReplicaSet.Name, newestReplicaSet.UID) + }, + listOpts..., + ) + if err != nil { + return nil, err + } + if pod == nil { + return nil, nil + } + + newestPod, ok := pod.(*corev1.Pod) + if !ok { + return nil, fmt.Errorf("object is not of type *corev1.Pod but %T", pod) + } + + return newestPod, nil +} + +// MostRecentCompleteLogs returns the logs of the pod/container in case it is not running. If the pod/container is +// running then the logs of the previous pod/container are being returned. +func MostRecentCompleteLogs( + ctx context.Context, + podInterface corev1client.PodInterface, + pod *corev1.Pod, + containerName string, + tailLines *int64, +) ( + string, + error, +) { + previousLogs := false + for _, containerStatus := range pod.Status.ContainerStatuses { + if containerName == "" || containerStatus.Name == containerName { + previousLogs = containerStatus.State.Running != nil + break + } + } + + logs, err := kubernetes.GetPodLogs(ctx, podInterface, pod.Name, &corev1.PodLogOptions{ + Container: containerName, + TailLines: tailLines, + Previous: previousLogs, + }) + if err != nil { + return "", err + } + + return string(logs), nil +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/sorter.go b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/sorter.go new file mode 100644 index 000000000..2d67a2005 --- /dev/null +++ b/vendor/github.com/gardener/gardener/pkg/utils/kubernetes/sorter.go @@ -0,0 +1,79 @@ +// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package kubernetes + +import ( + "sort" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +// ByName returns a comparison function for sorting by name. +func ByName() SortBy { + return func(o1, o2 controllerutil.Object) bool { + return o1.GetName() < o2.GetName() + } +} + +// ByCreationTimestamp returns a comparison function for sorting by creation timestamp. +func ByCreationTimestamp() SortBy { + return func(o1, o2 controllerutil.Object) bool { + return o1.GetCreationTimestamp().Time.Before(o2.GetCreationTimestamp().Time) + } +} + +// SortBy the type of a "less" function that defines the ordering of its object arguments. +type SortBy func(o1, o2 controllerutil.Object) bool + +// Sort sorts the items in the provided list objects according to the sort-by function. +func (sortBy SortBy) Sort(objList runtime.Object) { + if !meta.IsListType(objList) { + panic("provided is not a list type") + } + + items, err := meta.ExtractList(objList) + if err != nil { + panic(err) + } + + ps := &objectSorter{objects: items, compareFn: sortBy} + sort.Sort(ps) + + if err := meta.SetList(objList, ps.objects); err != nil { + panic(err) + } +} + +type objectSorter struct { + objects []runtime.Object + compareFn SortBy +} + +func (s *objectSorter) Len() int { + return len(s.objects) +} + +func (s *objectSorter) Swap(i, j int) { + s.objects[i], s.objects[j] = s.objects[j], s.objects[i] +} + +func (s *objectSorter) Less(i, j int) bool { + return s.compareFn( + s.objects[i].(controllerutil.Object), + s.objects[j].(controllerutil.Object), + ) +} diff --git a/vendor/github.com/gardener/gardener/pkg/utils/retry/retry.go b/vendor/github.com/gardener/gardener/pkg/utils/retry/retry.go index b0ef8fe6b..75502758b 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/retry/retry.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/retry/retry.go @@ -107,36 +107,37 @@ func MinorOrSevereError(retryCountUntilSevere, threshold int, err error) (bool, return MinorError(err) } -type retryError struct { +// Error is an error that occurred during a retry operation. +type Error struct { ctxError error err error } // Cause implements Causer. -func (r *retryError) Cause() error { - if r.err != nil { - return r.err +func (e *Error) Cause() error { + if e.err != nil { + return e.err } - return r.ctxError + return e.ctxError } // Unwrap implements the Unwrap function // https://golang.org/pkg/errors/#Unwrap -func (r *retryError) Unwrap() error { - return r.err +func (e *Error) Unwrap() error { + return e.err } // Error implements error. -func (r *retryError) Error() string { - if r.err != nil { - return fmt.Sprintf("retry failed with %v, last error: %v", r.ctxError, r.err) +func (e *Error) Error() string { + if e.err != nil { + return fmt.Sprintf("retry failed with %v, last error: %v", e.ctxError, e.err) } - return fmt.Sprintf("retry failed with %v", r.ctxError) + return fmt.Sprintf("retry failed with %v", e.ctxError) } -// NewRetryError returns a new error with the given context error and error. The non-context error is optional. -func NewRetryError(ctxError, err error) error { - return &retryError{ctxError, err} +// NewError returns a new error with the given context error and error. The non-context error is optional. +func NewError(ctxError, err error) error { + return &Error{ctxError, err} } // UntilFor keeps retrying the given Func until it either errors severely or the context expires. @@ -166,12 +167,12 @@ func UntilFor(ctx context.Context, waitFunc WaitFunc, agg ErrorAggregator, f Fun case <-waitDone: select { case <-ctxDone: - return NewRetryError(ctx.Err(), agg.Error()) + return NewError(ctx.Err(), agg.Error()) default: return nil } case <-ctxDone: - return NewRetryError(ctx.Err(), agg.Error()) + return NewError(ctx.Err(), agg.Error()) } }(); err != nil { return err diff --git a/vendor/github.com/gardener/gardener/pkg/utils/secrets/basic_auth.go b/vendor/github.com/gardener/gardener/pkg/utils/secrets/basic_auth.go index 06d88bc9a..8a60b984e 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/secrets/basic_auth.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/secrets/basic_auth.go @@ -21,8 +21,6 @@ import ( "github.com/gardener/gardener/pkg/utils" "github.com/gardener/gardener/pkg/utils/infodata" "k8s.io/apiserver/pkg/authentication/user" - - "golang.org/x/crypto/bcrypt" ) type formatType string @@ -40,8 +38,6 @@ const ( DataKeyUserName = "username" // DataKeyPassword is the key in a secret data holding the password. DataKeyPassword = "password" - // DataKeyPasswordBcryptHash is the key in a secret data holding the bcrypt hash of the password. - DataKeyPasswordBcryptHash = "bcryptPasswordHash" ) // BasicAuthSecretConfig contains the specification for a to-be-generated basic authentication secret. @@ -49,9 +45,8 @@ type BasicAuthSecretConfig struct { Name string Format formatType - Username string - PasswordLength int - BcryptPasswordHashRequest bool + Username string + PasswordLength int } // BasicAuth contains the username, the password, optionally hash of the password and the format for serializing the basic authentication @@ -59,9 +54,8 @@ type BasicAuth struct { Name string Format formatType - Username string - Password string - BcryptPasswordHash string + Username string + Password string } // GetName returns the name of the secret. @@ -134,15 +128,6 @@ func (s *BasicAuthSecretConfig) generateWithPassword(password string) (*BasicAut Password: password, } - if s.BcryptPasswordHashRequest { - hashedPassword, err := bcrypt.GenerateFromPassword([]byte(password), 16) - if err != nil { - return nil, err - } - - basicAuth.BcryptPasswordHash = string(hashedPassword) - } - return basicAuth, nil } @@ -155,10 +140,6 @@ func (b *BasicAuth) SecretData() map[string][]byte { data[DataKeyUserName] = []byte(b.Username) data[DataKeyPassword] = []byte(b.Password) - if b.BcryptPasswordHash != "" { - data[DataKeyPasswordBcryptHash] = []byte(b.BcryptPasswordHash) - } - fallthrough case BasicAuthFormatCSV: diff --git a/vendor/github.com/gardener/gardener/pkg/utils/secrets/certificates.go b/vendor/github.com/gardener/gardener/pkg/utils/secrets/certificates.go index 462a6a01d..0d2fd550e 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/secrets/certificates.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/secrets/certificates.go @@ -265,9 +265,9 @@ func LoadCertificate(name string, privateKeyPEM, certificatePEM []byte) (*Certif } // LoadCAFromSecret loads a CA certificate from an existing Kubernetes secret object. It returns the secret, the Certificate and an error. -func LoadCAFromSecret(k8sClient client.Client, namespace, name string) (*corev1.Secret, *Certificate, error) { +func LoadCAFromSecret(ctx context.Context, k8sClient client.Client, namespace, name string) (*corev1.Secret, *Certificate, error) { secret := &corev1.Secret{} - if err := k8sClient.Get(context.TODO(), kutil.Key(namespace, name), secret); err != nil { + if err := k8sClient.Get(ctx, kutil.Key(namespace, name), secret); err != nil { return nil, nil, err } @@ -335,7 +335,7 @@ func signCertificate(certificateTemplate *x509.Certificate, privateKey *rsa.Priv return utils.EncodeCertificate(certificate), nil } -func generateCA(k8sClusterClient kubernetes.Interface, config *CertificateSecretConfig, namespace string) (*corev1.Secret, *Certificate, error) { +func generateCA(ctx context.Context, k8sClusterClient kubernetes.Interface, config *CertificateSecretConfig, namespace string) (*corev1.Secret, *Certificate, error) { certificate, err := config.GenerateCertificate() if err != nil { return nil, nil, err @@ -350,7 +350,7 @@ func generateCA(k8sClusterClient kubernetes.Interface, config *CertificateSecret Data: certificate.SecretData(), } - if err := k8sClusterClient.Client().Create(context.TODO(), secret); err != nil { + if err := k8sClusterClient.Client().Create(ctx, secret); err != nil { return nil, nil, err } return secret, certificate, nil @@ -367,7 +367,7 @@ func loadCA(name string, existingSecret *corev1.Secret) (*corev1.Secret, *Certif // GenerateCertificateAuthorities get a map of wanted certificates and check If they exist in the existingSecretsMap based on the keys in the map. If they exist it get only the certificate from the corresponding // existing secret and makes a certificate DataInterface from the existing secret. If there is no existing secret contaning the wanted certificate, we make one certificate and with it we deploy in K8s cluster // a secret with that certificate and then return the newly existing secret. The function returns a map of secrets contaning the wanted CA, a map with the wanted CA certificate and an error. -func GenerateCertificateAuthorities(k8sClusterClient kubernetes.Interface, existingSecretsMap map[string]*corev1.Secret, wantedCertificateAuthorities map[string]*CertificateSecretConfig, namespace string) (map[string]*corev1.Secret, map[string]*Certificate, error) { +func GenerateCertificateAuthorities(ctx context.Context, k8sClusterClient kubernetes.Interface, existingSecretsMap map[string]*corev1.Secret, wantedCertificateAuthorities map[string]*CertificateSecretConfig, namespace string) (map[string]*corev1.Secret, map[string]*Certificate, error) { type caOutput struct { secret *corev1.Secret certificate *Certificate @@ -388,7 +388,7 @@ func GenerateCertificateAuthorities(k8sClusterClient kubernetes.Interface, exist if existingSecret, ok := existingSecretsMap[name]; !ok { go func(config *CertificateSecretConfig) { defer wg.Done() - secret, certificate, err := generateCA(k8sClusterClient, config, namespace) + secret, certificate, err := generateCA(ctx, k8sClusterClient, config, namespace) results <- &caOutput{secret, certificate, err} }(config) } else { diff --git a/vendor/github.com/gardener/gardener/pkg/utils/secrets/secrets.go b/vendor/github.com/gardener/gardener/pkg/utils/secrets/secrets.go index a7cd7d083..1f12d1e52 100644 --- a/vendor/github.com/gardener/gardener/pkg/utils/secrets/secrets.go +++ b/vendor/github.com/gardener/gardener/pkg/utils/secrets/secrets.go @@ -45,7 +45,10 @@ func (s *Secrets) Deploy( cs kubernetes.Interface, gcs gardenerkubernetes.Interface, namespace string, -) (map[string]*corev1.Secret, error) { +) ( + map[string]*corev1.Secret, + error, +) { // Get existing secrets in the namespace existingSecrets, err := getSecrets(ctx, cs, namespace) if err != nil { @@ -53,7 +56,7 @@ func (s *Secrets) Deploy( } // Generate CAs - _, cas, err := GenerateCertificateAuthorities(gcs, existingSecrets, s.CertificateSecretConfigs, namespace) + _, cas, err := GenerateCertificateAuthorities(ctx, gcs, existingSecrets, s.CertificateSecretConfigs, namespace) if err != nil { return nil, errors.Wrapf(err, "could not generate CA secrets in namespace '%s'", namespace) } diff --git a/vendor/github.com/gardener/gardener/test/framework/utils.go b/vendor/github.com/gardener/gardener/test/framework/utils.go index 5e00c7b23..43c61c7d5 100644 --- a/vendor/github.com/gardener/gardener/test/framework/utils.go +++ b/vendor/github.com/gardener/gardener/test/framework/utils.go @@ -158,26 +158,26 @@ func ParseFileAsWorkers(filepath string) ([]gardencorev1beta1.Worker, error) { } // TextValidation is a map of regular expression to description -// that is used to validate texts based on white- or blacklisted regexps. +// that is used to validate texts based on allowed or denied regexps. type TextValidation map[string]string -// ValidateAsWhitelist validates that all whitelisted regular expressions +// ValidateAsAllowlist validates that all allowed regular expressions // are in the given text. -func (v *TextValidation) ValidateAsWhitelist(text []byte) error { +func (v *TextValidation) ValidateAsAllowlist(text []byte) error { return v.validate(text, func(matches [][]byte) error { if len(matches) == 0 { - return errors.New("whitelisted RegExp not found") + return errors.New("allowed RegExp not found") } return nil }) } -// ValidateAsBlacklist validates that no blacklisted regular expressions +// ValidateAsDenylist validates that no denied regular expressions // are in the given text. -func (v *TextValidation) ValidateAsBlacklist(text []byte) error { +func (v *TextValidation) ValidateAsDenylist(text []byte) error { return v.validate(text, func(matches [][]byte) error { if len(matches) != 0 { - return errors.New("blacklisted RegExp found") + return errors.New("denied RegExp found") } return nil }) diff --git a/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2/doc.go b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2/doc.go new file mode 100644 index 000000000..5e2589c27 --- /dev/null +++ b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +groupName=kubescheduler.config.k8s.io + +package v1alpha2 // import "k8s.io/kube-scheduler/config/v1alpha2" diff --git a/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2/register.go b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2/register.go new file mode 100644 index 000000000..d5715c4e4 --- /dev/null +++ b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2/register.go @@ -0,0 +1,43 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package +const GroupName = "kubescheduler.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes registers known types to the given scheme +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &KubeSchedulerConfiguration{}, + ) + return nil +} diff --git a/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2/types.go b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2/types.go new file mode 100644 index 000000000..2474b716a --- /dev/null +++ b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2/types.go @@ -0,0 +1,204 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" + v1 "k8s.io/kube-scheduler/config/v1" +) + +const ( + // SchedulerDefaultLockObjectNamespace defines default scheduler lock object namespace ("kube-system") + SchedulerDefaultLockObjectNamespace string = metav1.NamespaceSystem + + // SchedulerDefaultLockObjectName defines default scheduler lock object name ("kube-scheduler") + SchedulerDefaultLockObjectName = "kube-scheduler" + + // SchedulerDefaultProviderName defines the default provider names + SchedulerDefaultProviderName = "DefaultProvider" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeSchedulerConfiguration configures a scheduler +type KubeSchedulerConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // LeaderElection defines the configuration of leader election client. + LeaderElection KubeSchedulerLeaderElectionConfiguration `json:"leaderElection"` + + // ClientConnection specifies the kubeconfig file and client connection + // settings for the proxy server to use when communicating with the apiserver. + ClientConnection componentbaseconfigv1alpha1.ClientConnectionConfiguration `json:"clientConnection"` + // HealthzBindAddress is the IP address and port for the health check server to serve on, + // defaulting to 0.0.0.0:10251 + HealthzBindAddress *string `json:"healthzBindAddress,omitempty"` + // MetricsBindAddress is the IP address and port for the metrics server to + // serve on, defaulting to 0.0.0.0:10251. + MetricsBindAddress *string `json:"metricsBindAddress,omitempty"` + + // DebuggingConfiguration holds configuration for Debugging related features + // TODO: We might wanna make this a substruct like Debugging componentbaseconfigv1alpha1.DebuggingConfiguration + componentbaseconfigv1alpha1.DebuggingConfiguration `json:",inline"` + + // DisablePreemption disables the pod preemption feature. + DisablePreemption *bool `json:"disablePreemption,omitempty"` + + // PercentageOfNodeToScore is the percentage of all nodes that once found feasible + // for running a pod, the scheduler stops its search for more feasible nodes in + // the cluster. This helps improve scheduler's performance. Scheduler always tries to find + // at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. + // Example: if the cluster size is 500 nodes and the value of this flag is 30, + // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. + // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the + // nodes will be scored. + PercentageOfNodesToScore *int32 `json:"percentageOfNodesToScore,omitempty"` + + // Duration to wait for a binding operation to complete before timing out + // Value must be non-negative integer. The value zero indicates no waiting. + // If this value is nil, the default value will be used. + BindTimeoutSeconds *int64 `json:"bindTimeoutSeconds"` + + // PodInitialBackoffSeconds is the initial backoff for unschedulable pods. + // If specified, it must be greater than 0. If this value is null, the default value (1s) + // will be used. + PodInitialBackoffSeconds *int64 `json:"podInitialBackoffSeconds"` + + // PodMaxBackoffSeconds is the max backoff for unschedulable pods. + // If specified, it must be greater than podInitialBackoffSeconds. If this value is null, + // the default value (10s) will be used. + PodMaxBackoffSeconds *int64 `json:"podMaxBackoffSeconds"` + + // Profiles are scheduling profiles that kube-scheduler supports. Pods can + // choose to be scheduled under a particular profile by setting its associated + // scheduler name. Pods that don't specify any scheduler name are scheduled + // with the "default-scheduler" profile, if present here. + // +listType=map + // +listMapKey=schedulerName + Profiles []KubeSchedulerProfile `json:"profiles"` + + // Extenders are the list of scheduler extenders, each holding the values of how to communicate + // with the extender. These extenders are shared by all scheduler profiles. + // +listType=set + Extenders []v1.Extender `json:"extenders"` +} + +// KubeSchedulerProfile is a scheduling profile. +type KubeSchedulerProfile struct { + // SchedulerName is the name of the scheduler associated to this profile. + // If SchedulerName matches with the pod's "spec.schedulerName", then the pod + // is scheduled with this profile. + SchedulerName *string `json:"schedulerName,omitempty"` + + // Plugins specify the set of plugins that should be enabled or disabled. + // Enabled plugins are the ones that should be enabled in addition to the + // default plugins. Disabled plugins are any of the default plugins that + // should be disabled. + // When no enabled or disabled plugin is specified for an extension point, + // default plugins for that extension point will be used if there is any. + // If a QueueSort plugin is specified, the same QueueSort Plugin and + // PluginConfig must be specified for all profiles. + Plugins *Plugins `json:"plugins,omitempty"` + + // PluginConfig is an optional set of custom plugin arguments for each plugin. + // Omitting config args for a plugin is equivalent to using the default config + // for that plugin. + // +listType=map + // +listMapKey=name + PluginConfig []PluginConfig `json:"pluginConfig,omitempty"` +} + +// KubeSchedulerLeaderElectionConfiguration expands LeaderElectionConfiguration +// to include scheduler specific configuration. +type KubeSchedulerLeaderElectionConfiguration struct { + componentbaseconfigv1alpha1.LeaderElectionConfiguration `json:",inline"` +} + +// Plugins include multiple extension points. When specified, the list of plugins for +// a particular extension point are the only ones enabled. If an extension point is +// omitted from the config, then the default set of plugins is used for that extension point. +// Enabled plugins are called in the order specified here, after default plugins. If they need to +// be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order. +type Plugins struct { + // QueueSort is a list of plugins that should be invoked when sorting pods in the scheduling queue. + QueueSort *PluginSet `json:"queueSort,omitempty"` + + // PreFilter is a list of plugins that should be invoked at "PreFilter" extension point of the scheduling framework. + PreFilter *PluginSet `json:"preFilter,omitempty"` + + // Filter is a list of plugins that should be invoked when filtering out nodes that cannot run the Pod. + Filter *PluginSet `json:"filter,omitempty"` + + // PreScore is a list of plugins that are invoked before scoring. + PreScore *PluginSet `json:"preScore,omitempty"` + + // Score is a list of plugins that should be invoked when ranking nodes that have passed the filtering phase. + Score *PluginSet `json:"score,omitempty"` + + // Reserve is a list of plugins invoked when reserving a node to run the pod. + Reserve *PluginSet `json:"reserve,omitempty"` + + // Permit is a list of plugins that control binding of a Pod. These plugins can prevent or delay binding of a Pod. + Permit *PluginSet `json:"permit,omitempty"` + + // PreBind is a list of plugins that should be invoked before a pod is bound. + PreBind *PluginSet `json:"preBind,omitempty"` + + // Bind is a list of plugins that should be invoked at "Bind" extension point of the scheduling framework. + // The scheduler call these plugins in order. Scheduler skips the rest of these plugins as soon as one returns success. + Bind *PluginSet `json:"bind,omitempty"` + + // PostBind is a list of plugins that should be invoked after a pod is successfully bound. + PostBind *PluginSet `json:"postBind,omitempty"` + + // Unreserve is a list of plugins invoked when a pod that was previously reserved is rejected in a later phase. + Unreserve *PluginSet `json:"unreserve,omitempty"` +} + +// PluginSet specifies enabled and disabled plugins for an extension point. +// If an array is empty, missing, or nil, default plugins at that extension point will be used. +type PluginSet struct { + // Enabled specifies plugins that should be enabled in addition to default plugins. + // These are called after default plugins and in the same order specified here. + // +listType=atomic + Enabled []Plugin `json:"enabled,omitempty"` + // Disabled specifies default plugins that should be disabled. + // When all default plugins need to be disabled, an array containing only one "*" should be provided. + // +listType=map + // +listMapKey=name + Disabled []Plugin `json:"disabled,omitempty"` +} + +// Plugin specifies a plugin name and its weight when applicable. Weight is used only for Score plugins. +type Plugin struct { + // Name defines the name of plugin + Name string `json:"name"` + // Weight defines the weight of plugin, only used for Score plugins. + Weight *int32 `json:"weight,omitempty"` +} + +// PluginConfig specifies arguments that should be passed to a plugin at the time of initialization. +// A plugin that is invoked at multiple extension points is initialized once. Args can have arbitrary structure. +// It is up to the plugin to process these Args. +type PluginConfig struct { + // Name defines the name of plugin being configured + Name string `json:"name"` + // Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure. + Args runtime.Unknown `json:"args,omitempty"` +} diff --git a/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2/zz_generated.deepcopy.go new file mode 100644 index 000000000..afd67b9d3 --- /dev/null +++ b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2/zz_generated.deepcopy.go @@ -0,0 +1,292 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/kube-scheduler/config/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.LeaderElection.DeepCopyInto(&out.LeaderElection) + out.ClientConnection = in.ClientConnection + if in.HealthzBindAddress != nil { + in, out := &in.HealthzBindAddress, &out.HealthzBindAddress + *out = new(string) + **out = **in + } + if in.MetricsBindAddress != nil { + in, out := &in.MetricsBindAddress, &out.MetricsBindAddress + *out = new(string) + **out = **in + } + in.DebuggingConfiguration.DeepCopyInto(&out.DebuggingConfiguration) + if in.DisablePreemption != nil { + in, out := &in.DisablePreemption, &out.DisablePreemption + *out = new(bool) + **out = **in + } + if in.PercentageOfNodesToScore != nil { + in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore + *out = new(int32) + **out = **in + } + if in.BindTimeoutSeconds != nil { + in, out := &in.BindTimeoutSeconds, &out.BindTimeoutSeconds + *out = new(int64) + **out = **in + } + if in.PodInitialBackoffSeconds != nil { + in, out := &in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds + *out = new(int64) + **out = **in + } + if in.PodMaxBackoffSeconds != nil { + in, out := &in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds + *out = new(int64) + **out = **in + } + if in.Profiles != nil { + in, out := &in.Profiles, &out.Profiles + *out = make([]KubeSchedulerProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Extenders != nil { + in, out := &in.Extenders, &out.Extenders + *out = make([]v1.Extender, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfiguration. +func (in *KubeSchedulerConfiguration) DeepCopy() *KubeSchedulerConfiguration { + if in == nil { + return nil + } + out := new(KubeSchedulerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerLeaderElectionConfiguration) DeepCopyInto(out *KubeSchedulerLeaderElectionConfiguration) { + *out = *in + in.LeaderElectionConfiguration.DeepCopyInto(&out.LeaderElectionConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerLeaderElectionConfiguration. +func (in *KubeSchedulerLeaderElectionConfiguration) DeepCopy() *KubeSchedulerLeaderElectionConfiguration { + if in == nil { + return nil + } + out := new(KubeSchedulerLeaderElectionConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerProfile) DeepCopyInto(out *KubeSchedulerProfile) { + *out = *in + if in.SchedulerName != nil { + in, out := &in.SchedulerName, &out.SchedulerName + *out = new(string) + **out = **in + } + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = new(Plugins) + (*in).DeepCopyInto(*out) + } + if in.PluginConfig != nil { + in, out := &in.PluginConfig, &out.PluginConfig + *out = make([]PluginConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerProfile. +func (in *KubeSchedulerProfile) DeepCopy() *KubeSchedulerProfile { + if in == nil { + return nil + } + out := new(KubeSchedulerProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Plugin) DeepCopyInto(out *Plugin) { + *out = *in + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin. +func (in *Plugin) DeepCopy() *Plugin { + if in == nil { + return nil + } + out := new(Plugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginConfig) DeepCopyInto(out *PluginConfig) { + *out = *in + in.Args.DeepCopyInto(&out.Args) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig. +func (in *PluginConfig) DeepCopy() *PluginConfig { + if in == nil { + return nil + } + out := new(PluginConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginSet) DeepCopyInto(out *PluginSet) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]Plugin, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]Plugin, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet. +func (in *PluginSet) DeepCopy() *PluginSet { + if in == nil { + return nil + } + out := new(PluginSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Plugins) DeepCopyInto(out *Plugins) { + *out = *in + if in.QueueSort != nil { + in, out := &in.QueueSort, &out.QueueSort + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PreFilter != nil { + in, out := &in.PreFilter, &out.PreFilter + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PreScore != nil { + in, out := &in.PreScore, &out.PreScore + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Score != nil { + in, out := &in.Score, &out.Score + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Reserve != nil { + in, out := &in.Reserve, &out.Reserve + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Permit != nil { + in, out := &in.Permit, &out.Permit + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PreBind != nil { + in, out := &in.PreBind, &out.PreBind + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Bind != nil { + in, out := &in.Bind, &out.Bind + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PostBind != nil { + in, out := &in.PostBind, &out.PostBind + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Unreserve != nil { + in, out := &in.Unreserve, &out.Unreserve + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins. +func (in *Plugins) DeepCopy() *Plugins { + if in == nil { + return nil + } + out := new(Plugins) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/doc.go b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/doc.go new file mode 100644 index 000000000..c3bac316d --- /dev/null +++ b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +groupName=kubescheduler.config.k8s.io + +package v1beta1 // import "k8s.io/kube-scheduler/config/v1beta1" diff --git a/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/register.go b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/register.go new file mode 100644 index 000000000..98a8f7b22 --- /dev/null +++ b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package +const GroupName = "kubescheduler.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes registers known types to the given scheme +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &KubeSchedulerConfiguration{}, + &InterPodAffinityArgs{}, + &NodeLabelArgs{}, + &NodeResourcesFitArgs{}, + &PodTopologySpreadArgs{}, + &RequestedToCapacityRatioArgs{}, + &ServiceAffinityArgs{}, + &NodeResourcesLeastAllocatedArgs{}, + &NodeResourcesMostAllocatedArgs{}, + &VolumeBindingArgs{}, + ) + return nil +} diff --git a/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/types.go b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/types.go new file mode 100644 index 000000000..87965c734 --- /dev/null +++ b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/types.go @@ -0,0 +1,306 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "bytes" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" + v1 "k8s.io/kube-scheduler/config/v1" + "sigs.k8s.io/yaml" +) + +const ( + // SchedulerDefaultLockObjectNamespace defines default scheduler lock object namespace ("kube-system") + SchedulerDefaultLockObjectNamespace string = metav1.NamespaceSystem + + // SchedulerDefaultLockObjectName defines default scheduler lock object name ("kube-scheduler") + SchedulerDefaultLockObjectName = "kube-scheduler" + + // SchedulerDefaultProviderName defines the default provider names + SchedulerDefaultProviderName = "DefaultProvider" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeSchedulerConfiguration configures a scheduler +type KubeSchedulerConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // LeaderElection defines the configuration of leader election client. + LeaderElection componentbaseconfigv1alpha1.LeaderElectionConfiguration `json:"leaderElection"` + + // ClientConnection specifies the kubeconfig file and client connection + // settings for the proxy server to use when communicating with the apiserver. + ClientConnection componentbaseconfigv1alpha1.ClientConnectionConfiguration `json:"clientConnection"` + // HealthzBindAddress is the IP address and port for the health check server to serve on, + // defaulting to 0.0.0.0:10251 + HealthzBindAddress *string `json:"healthzBindAddress,omitempty"` + // MetricsBindAddress is the IP address and port for the metrics server to + // serve on, defaulting to 0.0.0.0:10251. + MetricsBindAddress *string `json:"metricsBindAddress,omitempty"` + + // DebuggingConfiguration holds configuration for Debugging related features + // TODO: We might wanna make this a substruct like Debugging componentbaseconfigv1alpha1.DebuggingConfiguration + componentbaseconfigv1alpha1.DebuggingConfiguration `json:",inline"` + + // PercentageOfNodesToScore is the percentage of all nodes that once found feasible + // for running a pod, the scheduler stops its search for more feasible nodes in + // the cluster. This helps improve scheduler's performance. Scheduler always tries to find + // at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. + // Example: if the cluster size is 500 nodes and the value of this flag is 30, + // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. + // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the + // nodes will be scored. + PercentageOfNodesToScore *int32 `json:"percentageOfNodesToScore,omitempty"` + + // PodInitialBackoffSeconds is the initial backoff for unschedulable pods. + // If specified, it must be greater than 0. If this value is null, the default value (1s) + // will be used. + PodInitialBackoffSeconds *int64 `json:"podInitialBackoffSeconds,omitempty"` + + // PodMaxBackoffSeconds is the max backoff for unschedulable pods. + // If specified, it must be greater than podInitialBackoffSeconds. If this value is null, + // the default value (10s) will be used. + PodMaxBackoffSeconds *int64 `json:"podMaxBackoffSeconds,omitempty"` + + // Profiles are scheduling profiles that kube-scheduler supports. Pods can + // choose to be scheduled under a particular profile by setting its associated + // scheduler name. Pods that don't specify any scheduler name are scheduled + // with the "default-scheduler" profile, if present here. + // +listType=map + // +listMapKey=schedulerName + Profiles []KubeSchedulerProfile `json:"profiles,omitempty"` + + // Extenders are the list of scheduler extenders, each holding the values of how to communicate + // with the extender. These extenders are shared by all scheduler profiles. + // +listType=set + Extenders []Extender `json:"extenders,omitempty"` +} + +// DecodeNestedObjects decodes plugin args for known types. +func (c *KubeSchedulerConfiguration) DecodeNestedObjects(d runtime.Decoder) error { + for i := range c.Profiles { + prof := &c.Profiles[i] + for j := range prof.PluginConfig { + err := prof.PluginConfig[j].decodeNestedObjects(d) + if err != nil { + return fmt.Errorf("decoding .profiles[%d].pluginConfig[%d]: %w", i, j, err) + } + } + } + return nil +} + +// EncodeNestedObjects encodes plugin args. +func (c *KubeSchedulerConfiguration) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Profiles { + prof := &c.Profiles[i] + for j := range prof.PluginConfig { + err := prof.PluginConfig[j].encodeNestedObjects(e) + if err != nil { + return fmt.Errorf("encoding .profiles[%d].pluginConfig[%d]: %w", i, j, err) + } + } + } + return nil +} + +// KubeSchedulerProfile is a scheduling profile. +type KubeSchedulerProfile struct { + // SchedulerName is the name of the scheduler associated to this profile. + // If SchedulerName matches with the pod's "spec.schedulerName", then the pod + // is scheduled with this profile. + SchedulerName *string `json:"schedulerName,omitempty"` + + // Plugins specify the set of plugins that should be enabled or disabled. + // Enabled plugins are the ones that should be enabled in addition to the + // default plugins. Disabled plugins are any of the default plugins that + // should be disabled. + // When no enabled or disabled plugin is specified for an extension point, + // default plugins for that extension point will be used if there is any. + // If a QueueSort plugin is specified, the same QueueSort Plugin and + // PluginConfig must be specified for all profiles. + Plugins *Plugins `json:"plugins,omitempty"` + + // PluginConfig is an optional set of custom plugin arguments for each plugin. + // Omitting config args for a plugin is equivalent to using the default config + // for that plugin. + // +listType=map + // +listMapKey=name + PluginConfig []PluginConfig `json:"pluginConfig,omitempty"` +} + +// Plugins include multiple extension points. When specified, the list of plugins for +// a particular extension point are the only ones enabled. If an extension point is +// omitted from the config, then the default set of plugins is used for that extension point. +// Enabled plugins are called in the order specified here, after default plugins. If they need to +// be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order. +type Plugins struct { + // QueueSort is a list of plugins that should be invoked when sorting pods in the scheduling queue. + QueueSort *PluginSet `json:"queueSort,omitempty"` + + // PreFilter is a list of plugins that should be invoked at "PreFilter" extension point of the scheduling framework. + PreFilter *PluginSet `json:"preFilter,omitempty"` + + // Filter is a list of plugins that should be invoked when filtering out nodes that cannot run the Pod. + Filter *PluginSet `json:"filter,omitempty"` + + // PostFilter is a list of plugins that are invoked after filtering phase, no matter whether filtering succeeds or not. + PostFilter *PluginSet `json:"postFilter,omitempty"` + + // PreScore is a list of plugins that are invoked before scoring. + PreScore *PluginSet `json:"preScore,omitempty"` + + // Score is a list of plugins that should be invoked when ranking nodes that have passed the filtering phase. + Score *PluginSet `json:"score,omitempty"` + + // Reserve is a list of plugins invoked when reserving/unreserving resources + // after a node is assigned to run the pod. + Reserve *PluginSet `json:"reserve,omitempty"` + + // Permit is a list of plugins that control binding of a Pod. These plugins can prevent or delay binding of a Pod. + Permit *PluginSet `json:"permit,omitempty"` + + // PreBind is a list of plugins that should be invoked before a pod is bound. + PreBind *PluginSet `json:"preBind,omitempty"` + + // Bind is a list of plugins that should be invoked at "Bind" extension point of the scheduling framework. + // The scheduler call these plugins in order. Scheduler skips the rest of these plugins as soon as one returns success. + Bind *PluginSet `json:"bind,omitempty"` + + // PostBind is a list of plugins that should be invoked after a pod is successfully bound. + PostBind *PluginSet `json:"postBind,omitempty"` +} + +// PluginSet specifies enabled and disabled plugins for an extension point. +// If an array is empty, missing, or nil, default plugins at that extension point will be used. +type PluginSet struct { + // Enabled specifies plugins that should be enabled in addition to default plugins. + // These are called after default plugins and in the same order specified here. + // +listType=atomic + Enabled []Plugin `json:"enabled,omitempty"` + // Disabled specifies default plugins that should be disabled. + // When all default plugins need to be disabled, an array containing only one "*" should be provided. + // +listType=map + // +listMapKey=name + Disabled []Plugin `json:"disabled,omitempty"` +} + +// Plugin specifies a plugin name and its weight when applicable. Weight is used only for Score plugins. +type Plugin struct { + // Name defines the name of plugin + Name string `json:"name"` + // Weight defines the weight of plugin, only used for Score plugins. + Weight *int32 `json:"weight,omitempty"` +} + +// PluginConfig specifies arguments that should be passed to a plugin at the time of initialization. +// A plugin that is invoked at multiple extension points is initialized once. Args can have arbitrary structure. +// It is up to the plugin to process these Args. +type PluginConfig struct { + // Name defines the name of plugin being configured + Name string `json:"name"` + // Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure. + Args runtime.RawExtension `json:"args,omitempty"` +} + +func (c *PluginConfig) decodeNestedObjects(d runtime.Decoder) error { + gvk := SchemeGroupVersion.WithKind(c.Name + "Args") + // dry-run to detect and skip out-of-tree plugin args. + if _, _, err := d.Decode(nil, &gvk, nil); runtime.IsNotRegisteredError(err) { + return nil + } + + obj, parsedGvk, err := d.Decode(c.Args.Raw, &gvk, nil) + if err != nil { + return fmt.Errorf("decoding args for plugin %s: %w", c.Name, err) + } + if parsedGvk.GroupKind() != gvk.GroupKind() { + return fmt.Errorf("args for plugin %s were not of type %s, got %s", c.Name, gvk.GroupKind(), parsedGvk.GroupKind()) + } + c.Args.Object = obj + return nil +} + +func (c *PluginConfig) encodeNestedObjects(e runtime.Encoder) error { + if c.Args.Object == nil { + return nil + } + var buf bytes.Buffer + err := e.Encode(c.Args.Object, &buf) + if err != nil { + return err + } + // The encoder might be a YAML encoder, but the parent encoder expects + // JSON output, so we convert YAML back to JSON. + // This is a no-op if produces JSON. + json, err := yaml.YAMLToJSON(buf.Bytes()) + if err != nil { + return err + } + c.Args.Raw = json + return nil +} + +// Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, +// it is assumed that the extender chose not to provide that extension. +type Extender struct { + // URLPrefix at which the extender is available + URLPrefix string `json:"urlPrefix"` + // Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender. + FilterVerb string `json:"filterVerb,omitempty"` + // Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender. + PreemptVerb string `json:"preemptVerb,omitempty"` + // Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender. + PrioritizeVerb string `json:"prioritizeVerb,omitempty"` + // The numeric multiplier for the node scores that the prioritize call generates. + // The weight should be a positive integer + Weight int64 `json:"weight,omitempty"` + // Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. + // If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender + // can implement this function. + BindVerb string `json:"bindVerb,omitempty"` + // EnableHTTPS specifies whether https should be used to communicate with the extender + EnableHTTPS bool `json:"enableHTTPS,omitempty"` + // TLSConfig specifies the transport layer security config + TLSConfig *v1.ExtenderTLSConfig `json:"tlsConfig,omitempty"` + // HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize + // timeout is ignored, k8s/other extenders priorities are used to select the node. + HTTPTimeout metav1.Duration `json:"httpTimeout,omitempty"` + // NodeCacheCapable specifies that the extender is capable of caching node information, + // so the scheduler should only send minimal information about the eligible nodes + // assuming that the extender already cached full details of all nodes in the cluster + NodeCacheCapable bool `json:"nodeCacheCapable,omitempty"` + // ManagedResources is a list of extended resources that are managed by + // this extender. + // - A pod will be sent to the extender on the Filter, Prioritize and Bind + // (if the extender is the binder) phases iff the pod requests at least + // one of the extended resources in this list. If empty or unspecified, + // all pods will be sent to this extender. + // - If IgnoredByScheduler is set to true for a resource, kube-scheduler + // will skip checking the resource in predicates. + // +optional + // +listType=atomic + ManagedResources []v1.ExtenderManagedResource `json:"managedResources,omitempty"` + // Ignorable specifies if the extender is ignorable, i.e. scheduling should not + // fail when the extender returns an error or is not reachable. + Ignorable bool `json:"ignorable,omitempty"` +} diff --git a/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/types_pluginargs.go b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/types_pluginargs.go new file mode 100644 index 000000000..72088af20 --- /dev/null +++ b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/types_pluginargs.go @@ -0,0 +1,172 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InterPodAffinityArgs holds arguments used to configure the InterPodAffinity plugin. +type InterPodAffinityArgs struct { + metav1.TypeMeta `json:",inline"` + + // HardPodAffinityWeight is the scoring weight for existing pods with a + // matching hard affinity to the incoming pod. + HardPodAffinityWeight *int32 `json:"hardPodAffinityWeight,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeLabelArgs holds arguments used to configure the NodeLabel plugin. +type NodeLabelArgs struct { + metav1.TypeMeta `json:",inline"` + + // PresentLabels should be present for the node to be considered a fit for hosting the pod + // +listType=atomic + PresentLabels []string `json:"presentLabels,omitempty"` + // AbsentLabels should be absent for the node to be considered a fit for hosting the pod + // +listType=atomic + AbsentLabels []string `json:"absentLabels,omitempty"` + // Nodes that have labels in the list will get a higher score. + // +listType=atomic + PresentLabelsPreference []string `json:"presentLabelsPreference,omitempty"` + // Nodes that don't have labels in the list will get a higher score. + // +listType=atomic + AbsentLabelsPreference []string `json:"absentLabelsPreference,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeResourcesFitArgs holds arguments used to configure the NodeResourcesFit plugin. +type NodeResourcesFitArgs struct { + metav1.TypeMeta `json:",inline"` + + // IgnoredResources is the list of resources that NodeResources fit filter + // should ignore. + // +listType=atomic + IgnoredResources []string `json:"ignoredResources,omitempty"` + // IgnoredResourceGroups defines the list of resource groups that NodeResources fit filter should ignore. + // e.g. if group is ["example.com"], it will ignore all resource names that begin + // with "example.com", such as "example.com/aaa" and "example.com/bbb". + // A resource group name can't contain '/'. + // +listType=atomic + IgnoredResourceGroups []string `json:"ignoredResourceGroups,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodTopologySpreadArgs holds arguments used to configure the PodTopologySpread plugin. +type PodTopologySpreadArgs struct { + metav1.TypeMeta `json:",inline"` + + // DefaultConstraints defines topology spread constraints to be applied to + // pods that don't define any in `pod.spec.topologySpreadConstraints`. + // `topologySpreadConstraint.labelSelectors` must be empty, as they are + // deduced the pods' membership to Services, Replication Controllers, Replica + // Sets or Stateful Sets. + // Empty by default. + // +optional + // +listType=atomic + DefaultConstraints []corev1.TopologySpreadConstraint `json:"defaultConstraints"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RequestedToCapacityRatioArgs holds arguments used to configure RequestedToCapacityRatio plugin. +type RequestedToCapacityRatioArgs struct { + metav1.TypeMeta `json:",inline"` + + // Points defining priority function shape + // +listType=atomic + Shape []UtilizationShapePoint `json:"shape"` + // Resources to be managed + // +listType=atomic + Resources []ResourceSpec `json:"resources,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeResourcesLeastAllocatedArgs holds arguments used to configure NodeResourcesLeastAllocated plugin. +type NodeResourcesLeastAllocatedArgs struct { + metav1.TypeMeta `json:",inline"` + + // Resources to be managed, if no resource is provided, default resource set with both + // the weight of "cpu" and "memory" set to "1" will be applied. + // Resource with "0" weight will not accountable for the final score. + // +listType=atomic + Resources []ResourceSpec `json:"resources,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeResourcesMostAllocatedArgs holds arguments used to configure NodeResourcesMostAllocated plugin. +type NodeResourcesMostAllocatedArgs struct { + metav1.TypeMeta `json:",inline"` + + // Resources to be managed, if no resource is provided, default resource set with both + // the weight of "cpu" and "memory" set to "1" will be applied. + // Resource with "0" weight will not accountable for the final score. + // +listType=atomic + Resources []ResourceSpec `json:"resources,omitempty"` +} + +// UtilizationShapePoint represents single point of priority function shape. +type UtilizationShapePoint struct { + // Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100. + Utilization int32 `json:"utilization"` + // Score assigned to given utilization (y axis). Valid values are 0 to 10. + Score int32 `json:"score"` +} + +// ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments. +type ResourceSpec struct { + // Name of the resource to be managed by RequestedToCapacityRatio function. + Name string `json:"name"` + // Weight of the resource. + Weight int64 `json:"weight,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceAffinityArgs holds arguments used to configure the ServiceAffinity plugin. +type ServiceAffinityArgs struct { + metav1.TypeMeta `json:",inline"` + + // AffinityLabels are homogeneous for pods that are scheduled to a node. + // (i.e. it returns true IFF this pod can be added to this node such that all other pods in + // the same service are running on nodes with the exact same values for Labels). + // +listType=atomic + AffinityLabels []string `json:"affinityLabels,omitempty"` + // AntiAffinityLabelsPreference are the labels to consider for service anti affinity scoring. + // +listType=atomic + AntiAffinityLabelsPreference []string `json:"antiAffinityLabelsPreference,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeBindingArgs holds arguments used to configure the VolumeBinding plugin. +type VolumeBindingArgs struct { + metav1.TypeMeta `json:",inline"` + + // BindTimeoutSeconds is the timeout in seconds in volume binding operation. + // Value must be non-negative integer. The value zero indicates no waiting. + // If this value is nil, the default value (600) will be used. + BindTimeoutSeconds *int64 `json:"bindTimeoutSeconds,omitempty"` +} diff --git a/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..4a5f5cd7b --- /dev/null +++ b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,627 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/kube-scheduler/config/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Extender) DeepCopyInto(out *Extender) { + *out = *in + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(v1.ExtenderTLSConfig) + (*in).DeepCopyInto(*out) + } + out.HTTPTimeout = in.HTTPTimeout + if in.ManagedResources != nil { + in, out := &in.ManagedResources, &out.ManagedResources + *out = make([]v1.ExtenderManagedResource, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extender. +func (in *Extender) DeepCopy() *Extender { + if in == nil { + return nil + } + out := new(Extender) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InterPodAffinityArgs) DeepCopyInto(out *InterPodAffinityArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.HardPodAffinityWeight != nil { + in, out := &in.HardPodAffinityWeight, &out.HardPodAffinityWeight + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterPodAffinityArgs. +func (in *InterPodAffinityArgs) DeepCopy() *InterPodAffinityArgs { + if in == nil { + return nil + } + out := new(InterPodAffinityArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InterPodAffinityArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.LeaderElection.DeepCopyInto(&out.LeaderElection) + out.ClientConnection = in.ClientConnection + if in.HealthzBindAddress != nil { + in, out := &in.HealthzBindAddress, &out.HealthzBindAddress + *out = new(string) + **out = **in + } + if in.MetricsBindAddress != nil { + in, out := &in.MetricsBindAddress, &out.MetricsBindAddress + *out = new(string) + **out = **in + } + in.DebuggingConfiguration.DeepCopyInto(&out.DebuggingConfiguration) + if in.PercentageOfNodesToScore != nil { + in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore + *out = new(int32) + **out = **in + } + if in.PodInitialBackoffSeconds != nil { + in, out := &in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds + *out = new(int64) + **out = **in + } + if in.PodMaxBackoffSeconds != nil { + in, out := &in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds + *out = new(int64) + **out = **in + } + if in.Profiles != nil { + in, out := &in.Profiles, &out.Profiles + *out = make([]KubeSchedulerProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Extenders != nil { + in, out := &in.Extenders, &out.Extenders + *out = make([]Extender, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfiguration. +func (in *KubeSchedulerConfiguration) DeepCopy() *KubeSchedulerConfiguration { + if in == nil { + return nil + } + out := new(KubeSchedulerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerProfile) DeepCopyInto(out *KubeSchedulerProfile) { + *out = *in + if in.SchedulerName != nil { + in, out := &in.SchedulerName, &out.SchedulerName + *out = new(string) + **out = **in + } + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = new(Plugins) + (*in).DeepCopyInto(*out) + } + if in.PluginConfig != nil { + in, out := &in.PluginConfig, &out.PluginConfig + *out = make([]PluginConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerProfile. +func (in *KubeSchedulerProfile) DeepCopy() *KubeSchedulerProfile { + if in == nil { + return nil + } + out := new(KubeSchedulerProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeLabelArgs) DeepCopyInto(out *NodeLabelArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.PresentLabels != nil { + in, out := &in.PresentLabels, &out.PresentLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AbsentLabels != nil { + in, out := &in.AbsentLabels, &out.AbsentLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PresentLabelsPreference != nil { + in, out := &in.PresentLabelsPreference, &out.PresentLabelsPreference + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AbsentLabelsPreference != nil { + in, out := &in.AbsentLabelsPreference, &out.AbsentLabelsPreference + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeLabelArgs. +func (in *NodeLabelArgs) DeepCopy() *NodeLabelArgs { + if in == nil { + return nil + } + out := new(NodeLabelArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeLabelArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeResourcesFitArgs) DeepCopyInto(out *NodeResourcesFitArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.IgnoredResources != nil { + in, out := &in.IgnoredResources, &out.IgnoredResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IgnoredResourceGroups != nil { + in, out := &in.IgnoredResourceGroups, &out.IgnoredResourceGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesFitArgs. +func (in *NodeResourcesFitArgs) DeepCopy() *NodeResourcesFitArgs { + if in == nil { + return nil + } + out := new(NodeResourcesFitArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeResourcesFitArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeResourcesLeastAllocatedArgs) DeepCopyInto(out *NodeResourcesLeastAllocatedArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesLeastAllocatedArgs. +func (in *NodeResourcesLeastAllocatedArgs) DeepCopy() *NodeResourcesLeastAllocatedArgs { + if in == nil { + return nil + } + out := new(NodeResourcesLeastAllocatedArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeResourcesLeastAllocatedArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeResourcesMostAllocatedArgs) DeepCopyInto(out *NodeResourcesMostAllocatedArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesMostAllocatedArgs. +func (in *NodeResourcesMostAllocatedArgs) DeepCopy() *NodeResourcesMostAllocatedArgs { + if in == nil { + return nil + } + out := new(NodeResourcesMostAllocatedArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeResourcesMostAllocatedArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Plugin) DeepCopyInto(out *Plugin) { + *out = *in + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin. +func (in *Plugin) DeepCopy() *Plugin { + if in == nil { + return nil + } + out := new(Plugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginConfig) DeepCopyInto(out *PluginConfig) { + *out = *in + in.Args.DeepCopyInto(&out.Args) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig. +func (in *PluginConfig) DeepCopy() *PluginConfig { + if in == nil { + return nil + } + out := new(PluginConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginSet) DeepCopyInto(out *PluginSet) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]Plugin, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]Plugin, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet. +func (in *PluginSet) DeepCopy() *PluginSet { + if in == nil { + return nil + } + out := new(PluginSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Plugins) DeepCopyInto(out *Plugins) { + *out = *in + if in.QueueSort != nil { + in, out := &in.QueueSort, &out.QueueSort + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PreFilter != nil { + in, out := &in.PreFilter, &out.PreFilter + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PostFilter != nil { + in, out := &in.PostFilter, &out.PostFilter + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PreScore != nil { + in, out := &in.PreScore, &out.PreScore + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Score != nil { + in, out := &in.Score, &out.Score + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Reserve != nil { + in, out := &in.Reserve, &out.Reserve + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Permit != nil { + in, out := &in.Permit, &out.Permit + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PreBind != nil { + in, out := &in.PreBind, &out.PreBind + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Bind != nil { + in, out := &in.Bind, &out.Bind + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PostBind != nil { + in, out := &in.PostBind, &out.PostBind + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins. +func (in *Plugins) DeepCopy() *Plugins { + if in == nil { + return nil + } + out := new(Plugins) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTopologySpreadArgs) DeepCopyInto(out *PodTopologySpreadArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.DefaultConstraints != nil { + in, out := &in.DefaultConstraints, &out.DefaultConstraints + *out = make([]corev1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTopologySpreadArgs. +func (in *PodTopologySpreadArgs) DeepCopy() *PodTopologySpreadArgs { + if in == nil { + return nil + } + out := new(PodTopologySpreadArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodTopologySpreadArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestedToCapacityRatioArgs) DeepCopyInto(out *RequestedToCapacityRatioArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Shape != nil { + in, out := &in.Shape, &out.Shape + *out = make([]UtilizationShapePoint, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestedToCapacityRatioArgs. +func (in *RequestedToCapacityRatioArgs) DeepCopy() *RequestedToCapacityRatioArgs { + if in == nil { + return nil + } + out := new(RequestedToCapacityRatioArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RequestedToCapacityRatioArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. +func (in *ResourceSpec) DeepCopy() *ResourceSpec { + if in == nil { + return nil + } + out := new(ResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAffinityArgs) DeepCopyInto(out *ServiceAffinityArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.AffinityLabels != nil { + in, out := &in.AffinityLabels, &out.AffinityLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AntiAffinityLabelsPreference != nil { + in, out := &in.AntiAffinityLabelsPreference, &out.AntiAffinityLabelsPreference + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAffinityArgs. +func (in *ServiceAffinityArgs) DeepCopy() *ServiceAffinityArgs { + if in == nil { + return nil + } + out := new(ServiceAffinityArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAffinityArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UtilizationShapePoint) DeepCopyInto(out *UtilizationShapePoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UtilizationShapePoint. +func (in *UtilizationShapePoint) DeepCopy() *UtilizationShapePoint { + if in == nil { + return nil + } + out := new(UtilizationShapePoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeBindingArgs) DeepCopyInto(out *VolumeBindingArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.BindTimeoutSeconds != nil { + in, out := &in.BindTimeoutSeconds, &out.BindTimeoutSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeBindingArgs. +func (in *VolumeBindingArgs) DeepCopy() *VolumeBindingArgs { + if in == nil { + return nil + } + out := new(VolumeBindingArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeBindingArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/doc.go b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/doc.go new file mode 100644 index 000000000..c3bac316d --- /dev/null +++ b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +groupName=kubescheduler.config.k8s.io + +package v1beta1 // import "k8s.io/kube-scheduler/config/v1beta1" diff --git a/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/register.go b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/register.go new file mode 100644 index 000000000..98a8f7b22 --- /dev/null +++ b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package +const GroupName = "kubescheduler.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes registers known types to the given scheme +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &KubeSchedulerConfiguration{}, + &InterPodAffinityArgs{}, + &NodeLabelArgs{}, + &NodeResourcesFitArgs{}, + &PodTopologySpreadArgs{}, + &RequestedToCapacityRatioArgs{}, + &ServiceAffinityArgs{}, + &NodeResourcesLeastAllocatedArgs{}, + &NodeResourcesMostAllocatedArgs{}, + &VolumeBindingArgs{}, + ) + return nil +} diff --git a/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/types.go b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/types.go new file mode 100644 index 000000000..87965c734 --- /dev/null +++ b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/types.go @@ -0,0 +1,306 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "bytes" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" + v1 "k8s.io/kube-scheduler/config/v1" + "sigs.k8s.io/yaml" +) + +const ( + // SchedulerDefaultLockObjectNamespace defines default scheduler lock object namespace ("kube-system") + SchedulerDefaultLockObjectNamespace string = metav1.NamespaceSystem + + // SchedulerDefaultLockObjectName defines default scheduler lock object name ("kube-scheduler") + SchedulerDefaultLockObjectName = "kube-scheduler" + + // SchedulerDefaultProviderName defines the default provider names + SchedulerDefaultProviderName = "DefaultProvider" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KubeSchedulerConfiguration configures a scheduler +type KubeSchedulerConfiguration struct { + metav1.TypeMeta `json:",inline"` + + // LeaderElection defines the configuration of leader election client. + LeaderElection componentbaseconfigv1alpha1.LeaderElectionConfiguration `json:"leaderElection"` + + // ClientConnection specifies the kubeconfig file and client connection + // settings for the proxy server to use when communicating with the apiserver. + ClientConnection componentbaseconfigv1alpha1.ClientConnectionConfiguration `json:"clientConnection"` + // HealthzBindAddress is the IP address and port for the health check server to serve on, + // defaulting to 0.0.0.0:10251 + HealthzBindAddress *string `json:"healthzBindAddress,omitempty"` + // MetricsBindAddress is the IP address and port for the metrics server to + // serve on, defaulting to 0.0.0.0:10251. + MetricsBindAddress *string `json:"metricsBindAddress,omitempty"` + + // DebuggingConfiguration holds configuration for Debugging related features + // TODO: We might wanna make this a substruct like Debugging componentbaseconfigv1alpha1.DebuggingConfiguration + componentbaseconfigv1alpha1.DebuggingConfiguration `json:",inline"` + + // PercentageOfNodesToScore is the percentage of all nodes that once found feasible + // for running a pod, the scheduler stops its search for more feasible nodes in + // the cluster. This helps improve scheduler's performance. Scheduler always tries to find + // at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. + // Example: if the cluster size is 500 nodes and the value of this flag is 30, + // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. + // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the + // nodes will be scored. + PercentageOfNodesToScore *int32 `json:"percentageOfNodesToScore,omitempty"` + + // PodInitialBackoffSeconds is the initial backoff for unschedulable pods. + // If specified, it must be greater than 0. If this value is null, the default value (1s) + // will be used. + PodInitialBackoffSeconds *int64 `json:"podInitialBackoffSeconds,omitempty"` + + // PodMaxBackoffSeconds is the max backoff for unschedulable pods. + // If specified, it must be greater than podInitialBackoffSeconds. If this value is null, + // the default value (10s) will be used. + PodMaxBackoffSeconds *int64 `json:"podMaxBackoffSeconds,omitempty"` + + // Profiles are scheduling profiles that kube-scheduler supports. Pods can + // choose to be scheduled under a particular profile by setting its associated + // scheduler name. Pods that don't specify any scheduler name are scheduled + // with the "default-scheduler" profile, if present here. + // +listType=map + // +listMapKey=schedulerName + Profiles []KubeSchedulerProfile `json:"profiles,omitempty"` + + // Extenders are the list of scheduler extenders, each holding the values of how to communicate + // with the extender. These extenders are shared by all scheduler profiles. + // +listType=set + Extenders []Extender `json:"extenders,omitempty"` +} + +// DecodeNestedObjects decodes plugin args for known types. +func (c *KubeSchedulerConfiguration) DecodeNestedObjects(d runtime.Decoder) error { + for i := range c.Profiles { + prof := &c.Profiles[i] + for j := range prof.PluginConfig { + err := prof.PluginConfig[j].decodeNestedObjects(d) + if err != nil { + return fmt.Errorf("decoding .profiles[%d].pluginConfig[%d]: %w", i, j, err) + } + } + } + return nil +} + +// EncodeNestedObjects encodes plugin args. +func (c *KubeSchedulerConfiguration) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Profiles { + prof := &c.Profiles[i] + for j := range prof.PluginConfig { + err := prof.PluginConfig[j].encodeNestedObjects(e) + if err != nil { + return fmt.Errorf("encoding .profiles[%d].pluginConfig[%d]: %w", i, j, err) + } + } + } + return nil +} + +// KubeSchedulerProfile is a scheduling profile. +type KubeSchedulerProfile struct { + // SchedulerName is the name of the scheduler associated to this profile. + // If SchedulerName matches with the pod's "spec.schedulerName", then the pod + // is scheduled with this profile. + SchedulerName *string `json:"schedulerName,omitempty"` + + // Plugins specify the set of plugins that should be enabled or disabled. + // Enabled plugins are the ones that should be enabled in addition to the + // default plugins. Disabled plugins are any of the default plugins that + // should be disabled. + // When no enabled or disabled plugin is specified for an extension point, + // default plugins for that extension point will be used if there is any. + // If a QueueSort plugin is specified, the same QueueSort Plugin and + // PluginConfig must be specified for all profiles. + Plugins *Plugins `json:"plugins,omitempty"` + + // PluginConfig is an optional set of custom plugin arguments for each plugin. + // Omitting config args for a plugin is equivalent to using the default config + // for that plugin. + // +listType=map + // +listMapKey=name + PluginConfig []PluginConfig `json:"pluginConfig,omitempty"` +} + +// Plugins include multiple extension points. When specified, the list of plugins for +// a particular extension point are the only ones enabled. If an extension point is +// omitted from the config, then the default set of plugins is used for that extension point. +// Enabled plugins are called in the order specified here, after default plugins. If they need to +// be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order. +type Plugins struct { + // QueueSort is a list of plugins that should be invoked when sorting pods in the scheduling queue. + QueueSort *PluginSet `json:"queueSort,omitempty"` + + // PreFilter is a list of plugins that should be invoked at "PreFilter" extension point of the scheduling framework. + PreFilter *PluginSet `json:"preFilter,omitempty"` + + // Filter is a list of plugins that should be invoked when filtering out nodes that cannot run the Pod. + Filter *PluginSet `json:"filter,omitempty"` + + // PostFilter is a list of plugins that are invoked after filtering phase, no matter whether filtering succeeds or not. + PostFilter *PluginSet `json:"postFilter,omitempty"` + + // PreScore is a list of plugins that are invoked before scoring. + PreScore *PluginSet `json:"preScore,omitempty"` + + // Score is a list of plugins that should be invoked when ranking nodes that have passed the filtering phase. + Score *PluginSet `json:"score,omitempty"` + + // Reserve is a list of plugins invoked when reserving/unreserving resources + // after a node is assigned to run the pod. + Reserve *PluginSet `json:"reserve,omitempty"` + + // Permit is a list of plugins that control binding of a Pod. These plugins can prevent or delay binding of a Pod. + Permit *PluginSet `json:"permit,omitempty"` + + // PreBind is a list of plugins that should be invoked before a pod is bound. + PreBind *PluginSet `json:"preBind,omitempty"` + + // Bind is a list of plugins that should be invoked at "Bind" extension point of the scheduling framework. + // The scheduler call these plugins in order. Scheduler skips the rest of these plugins as soon as one returns success. + Bind *PluginSet `json:"bind,omitempty"` + + // PostBind is a list of plugins that should be invoked after a pod is successfully bound. + PostBind *PluginSet `json:"postBind,omitempty"` +} + +// PluginSet specifies enabled and disabled plugins for an extension point. +// If an array is empty, missing, or nil, default plugins at that extension point will be used. +type PluginSet struct { + // Enabled specifies plugins that should be enabled in addition to default plugins. + // These are called after default plugins and in the same order specified here. + // +listType=atomic + Enabled []Plugin `json:"enabled,omitempty"` + // Disabled specifies default plugins that should be disabled. + // When all default plugins need to be disabled, an array containing only one "*" should be provided. + // +listType=map + // +listMapKey=name + Disabled []Plugin `json:"disabled,omitempty"` +} + +// Plugin specifies a plugin name and its weight when applicable. Weight is used only for Score plugins. +type Plugin struct { + // Name defines the name of plugin + Name string `json:"name"` + // Weight defines the weight of plugin, only used for Score plugins. + Weight *int32 `json:"weight,omitempty"` +} + +// PluginConfig specifies arguments that should be passed to a plugin at the time of initialization. +// A plugin that is invoked at multiple extension points is initialized once. Args can have arbitrary structure. +// It is up to the plugin to process these Args. +type PluginConfig struct { + // Name defines the name of plugin being configured + Name string `json:"name"` + // Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure. + Args runtime.RawExtension `json:"args,omitempty"` +} + +func (c *PluginConfig) decodeNestedObjects(d runtime.Decoder) error { + gvk := SchemeGroupVersion.WithKind(c.Name + "Args") + // dry-run to detect and skip out-of-tree plugin args. + if _, _, err := d.Decode(nil, &gvk, nil); runtime.IsNotRegisteredError(err) { + return nil + } + + obj, parsedGvk, err := d.Decode(c.Args.Raw, &gvk, nil) + if err != nil { + return fmt.Errorf("decoding args for plugin %s: %w", c.Name, err) + } + if parsedGvk.GroupKind() != gvk.GroupKind() { + return fmt.Errorf("args for plugin %s were not of type %s, got %s", c.Name, gvk.GroupKind(), parsedGvk.GroupKind()) + } + c.Args.Object = obj + return nil +} + +func (c *PluginConfig) encodeNestedObjects(e runtime.Encoder) error { + if c.Args.Object == nil { + return nil + } + var buf bytes.Buffer + err := e.Encode(c.Args.Object, &buf) + if err != nil { + return err + } + // The encoder might be a YAML encoder, but the parent encoder expects + // JSON output, so we convert YAML back to JSON. + // This is a no-op if produces JSON. + json, err := yaml.YAMLToJSON(buf.Bytes()) + if err != nil { + return err + } + c.Args.Raw = json + return nil +} + +// Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, +// it is assumed that the extender chose not to provide that extension. +type Extender struct { + // URLPrefix at which the extender is available + URLPrefix string `json:"urlPrefix"` + // Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender. + FilterVerb string `json:"filterVerb,omitempty"` + // Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender. + PreemptVerb string `json:"preemptVerb,omitempty"` + // Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender. + PrioritizeVerb string `json:"prioritizeVerb,omitempty"` + // The numeric multiplier for the node scores that the prioritize call generates. + // The weight should be a positive integer + Weight int64 `json:"weight,omitempty"` + // Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. + // If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender + // can implement this function. + BindVerb string `json:"bindVerb,omitempty"` + // EnableHTTPS specifies whether https should be used to communicate with the extender + EnableHTTPS bool `json:"enableHTTPS,omitempty"` + // TLSConfig specifies the transport layer security config + TLSConfig *v1.ExtenderTLSConfig `json:"tlsConfig,omitempty"` + // HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize + // timeout is ignored, k8s/other extenders priorities are used to select the node. + HTTPTimeout metav1.Duration `json:"httpTimeout,omitempty"` + // NodeCacheCapable specifies that the extender is capable of caching node information, + // so the scheduler should only send minimal information about the eligible nodes + // assuming that the extender already cached full details of all nodes in the cluster + NodeCacheCapable bool `json:"nodeCacheCapable,omitempty"` + // ManagedResources is a list of extended resources that are managed by + // this extender. + // - A pod will be sent to the extender on the Filter, Prioritize and Bind + // (if the extender is the binder) phases iff the pod requests at least + // one of the extended resources in this list. If empty or unspecified, + // all pods will be sent to this extender. + // - If IgnoredByScheduler is set to true for a resource, kube-scheduler + // will skip checking the resource in predicates. + // +optional + // +listType=atomic + ManagedResources []v1.ExtenderManagedResource `json:"managedResources,omitempty"` + // Ignorable specifies if the extender is ignorable, i.e. scheduling should not + // fail when the extender returns an error or is not reachable. + Ignorable bool `json:"ignorable,omitempty"` +} diff --git a/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/types_pluginargs.go b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/types_pluginargs.go new file mode 100644 index 000000000..72088af20 --- /dev/null +++ b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/types_pluginargs.go @@ -0,0 +1,172 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InterPodAffinityArgs holds arguments used to configure the InterPodAffinity plugin. +type InterPodAffinityArgs struct { + metav1.TypeMeta `json:",inline"` + + // HardPodAffinityWeight is the scoring weight for existing pods with a + // matching hard affinity to the incoming pod. + HardPodAffinityWeight *int32 `json:"hardPodAffinityWeight,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeLabelArgs holds arguments used to configure the NodeLabel plugin. +type NodeLabelArgs struct { + metav1.TypeMeta `json:",inline"` + + // PresentLabels should be present for the node to be considered a fit for hosting the pod + // +listType=atomic + PresentLabels []string `json:"presentLabels,omitempty"` + // AbsentLabels should be absent for the node to be considered a fit for hosting the pod + // +listType=atomic + AbsentLabels []string `json:"absentLabels,omitempty"` + // Nodes that have labels in the list will get a higher score. + // +listType=atomic + PresentLabelsPreference []string `json:"presentLabelsPreference,omitempty"` + // Nodes that don't have labels in the list will get a higher score. + // +listType=atomic + AbsentLabelsPreference []string `json:"absentLabelsPreference,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeResourcesFitArgs holds arguments used to configure the NodeResourcesFit plugin. +type NodeResourcesFitArgs struct { + metav1.TypeMeta `json:",inline"` + + // IgnoredResources is the list of resources that NodeResources fit filter + // should ignore. + // +listType=atomic + IgnoredResources []string `json:"ignoredResources,omitempty"` + // IgnoredResourceGroups defines the list of resource groups that NodeResources fit filter should ignore. + // e.g. if group is ["example.com"], it will ignore all resource names that begin + // with "example.com", such as "example.com/aaa" and "example.com/bbb". + // A resource group name can't contain '/'. + // +listType=atomic + IgnoredResourceGroups []string `json:"ignoredResourceGroups,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodTopologySpreadArgs holds arguments used to configure the PodTopologySpread plugin. +type PodTopologySpreadArgs struct { + metav1.TypeMeta `json:",inline"` + + // DefaultConstraints defines topology spread constraints to be applied to + // pods that don't define any in `pod.spec.topologySpreadConstraints`. + // `topologySpreadConstraint.labelSelectors` must be empty, as they are + // deduced the pods' membership to Services, Replication Controllers, Replica + // Sets or Stateful Sets. + // Empty by default. + // +optional + // +listType=atomic + DefaultConstraints []corev1.TopologySpreadConstraint `json:"defaultConstraints"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RequestedToCapacityRatioArgs holds arguments used to configure RequestedToCapacityRatio plugin. +type RequestedToCapacityRatioArgs struct { + metav1.TypeMeta `json:",inline"` + + // Points defining priority function shape + // +listType=atomic + Shape []UtilizationShapePoint `json:"shape"` + // Resources to be managed + // +listType=atomic + Resources []ResourceSpec `json:"resources,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeResourcesLeastAllocatedArgs holds arguments used to configure NodeResourcesLeastAllocated plugin. +type NodeResourcesLeastAllocatedArgs struct { + metav1.TypeMeta `json:",inline"` + + // Resources to be managed, if no resource is provided, default resource set with both + // the weight of "cpu" and "memory" set to "1" will be applied. + // Resource with "0" weight will not accountable for the final score. + // +listType=atomic + Resources []ResourceSpec `json:"resources,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeResourcesMostAllocatedArgs holds arguments used to configure NodeResourcesMostAllocated plugin. +type NodeResourcesMostAllocatedArgs struct { + metav1.TypeMeta `json:",inline"` + + // Resources to be managed, if no resource is provided, default resource set with both + // the weight of "cpu" and "memory" set to "1" will be applied. + // Resource with "0" weight will not accountable for the final score. + // +listType=atomic + Resources []ResourceSpec `json:"resources,omitempty"` +} + +// UtilizationShapePoint represents single point of priority function shape. +type UtilizationShapePoint struct { + // Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100. + Utilization int32 `json:"utilization"` + // Score assigned to given utilization (y axis). Valid values are 0 to 10. + Score int32 `json:"score"` +} + +// ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments. +type ResourceSpec struct { + // Name of the resource to be managed by RequestedToCapacityRatio function. + Name string `json:"name"` + // Weight of the resource. + Weight int64 `json:"weight,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceAffinityArgs holds arguments used to configure the ServiceAffinity plugin. +type ServiceAffinityArgs struct { + metav1.TypeMeta `json:",inline"` + + // AffinityLabels are homogeneous for pods that are scheduled to a node. + // (i.e. it returns true IFF this pod can be added to this node such that all other pods in + // the same service are running on nodes with the exact same values for Labels). + // +listType=atomic + AffinityLabels []string `json:"affinityLabels,omitempty"` + // AntiAffinityLabelsPreference are the labels to consider for service anti affinity scoring. + // +listType=atomic + AntiAffinityLabelsPreference []string `json:"antiAffinityLabelsPreference,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeBindingArgs holds arguments used to configure the VolumeBinding plugin. +type VolumeBindingArgs struct { + metav1.TypeMeta `json:",inline"` + + // BindTimeoutSeconds is the timeout in seconds in volume binding operation. + // Value must be non-negative integer. The value zero indicates no waiting. + // If this value is nil, the default value (600) will be used. + BindTimeoutSeconds *int64 `json:"bindTimeoutSeconds,omitempty"` +} diff --git a/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..4a5f5cd7b --- /dev/null +++ b/vendor/github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,627 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + v1 "k8s.io/kube-scheduler/config/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Extender) DeepCopyInto(out *Extender) { + *out = *in + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(v1.ExtenderTLSConfig) + (*in).DeepCopyInto(*out) + } + out.HTTPTimeout = in.HTTPTimeout + if in.ManagedResources != nil { + in, out := &in.ManagedResources, &out.ManagedResources + *out = make([]v1.ExtenderManagedResource, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extender. +func (in *Extender) DeepCopy() *Extender { + if in == nil { + return nil + } + out := new(Extender) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InterPodAffinityArgs) DeepCopyInto(out *InterPodAffinityArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.HardPodAffinityWeight != nil { + in, out := &in.HardPodAffinityWeight, &out.HardPodAffinityWeight + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterPodAffinityArgs. +func (in *InterPodAffinityArgs) DeepCopy() *InterPodAffinityArgs { + if in == nil { + return nil + } + out := new(InterPodAffinityArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InterPodAffinityArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.LeaderElection.DeepCopyInto(&out.LeaderElection) + out.ClientConnection = in.ClientConnection + if in.HealthzBindAddress != nil { + in, out := &in.HealthzBindAddress, &out.HealthzBindAddress + *out = new(string) + **out = **in + } + if in.MetricsBindAddress != nil { + in, out := &in.MetricsBindAddress, &out.MetricsBindAddress + *out = new(string) + **out = **in + } + in.DebuggingConfiguration.DeepCopyInto(&out.DebuggingConfiguration) + if in.PercentageOfNodesToScore != nil { + in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore + *out = new(int32) + **out = **in + } + if in.PodInitialBackoffSeconds != nil { + in, out := &in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds + *out = new(int64) + **out = **in + } + if in.PodMaxBackoffSeconds != nil { + in, out := &in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds + *out = new(int64) + **out = **in + } + if in.Profiles != nil { + in, out := &in.Profiles, &out.Profiles + *out = make([]KubeSchedulerProfile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Extenders != nil { + in, out := &in.Extenders, &out.Extenders + *out = make([]Extender, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfiguration. +func (in *KubeSchedulerConfiguration) DeepCopy() *KubeSchedulerConfiguration { + if in == nil { + return nil + } + out := new(KubeSchedulerConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeSchedulerProfile) DeepCopyInto(out *KubeSchedulerProfile) { + *out = *in + if in.SchedulerName != nil { + in, out := &in.SchedulerName, &out.SchedulerName + *out = new(string) + **out = **in + } + if in.Plugins != nil { + in, out := &in.Plugins, &out.Plugins + *out = new(Plugins) + (*in).DeepCopyInto(*out) + } + if in.PluginConfig != nil { + in, out := &in.PluginConfig, &out.PluginConfig + *out = make([]PluginConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerProfile. +func (in *KubeSchedulerProfile) DeepCopy() *KubeSchedulerProfile { + if in == nil { + return nil + } + out := new(KubeSchedulerProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeLabelArgs) DeepCopyInto(out *NodeLabelArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.PresentLabels != nil { + in, out := &in.PresentLabels, &out.PresentLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AbsentLabels != nil { + in, out := &in.AbsentLabels, &out.AbsentLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PresentLabelsPreference != nil { + in, out := &in.PresentLabelsPreference, &out.PresentLabelsPreference + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AbsentLabelsPreference != nil { + in, out := &in.AbsentLabelsPreference, &out.AbsentLabelsPreference + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeLabelArgs. +func (in *NodeLabelArgs) DeepCopy() *NodeLabelArgs { + if in == nil { + return nil + } + out := new(NodeLabelArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeLabelArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeResourcesFitArgs) DeepCopyInto(out *NodeResourcesFitArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.IgnoredResources != nil { + in, out := &in.IgnoredResources, &out.IgnoredResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IgnoredResourceGroups != nil { + in, out := &in.IgnoredResourceGroups, &out.IgnoredResourceGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesFitArgs. +func (in *NodeResourcesFitArgs) DeepCopy() *NodeResourcesFitArgs { + if in == nil { + return nil + } + out := new(NodeResourcesFitArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeResourcesFitArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeResourcesLeastAllocatedArgs) DeepCopyInto(out *NodeResourcesLeastAllocatedArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesLeastAllocatedArgs. +func (in *NodeResourcesLeastAllocatedArgs) DeepCopy() *NodeResourcesLeastAllocatedArgs { + if in == nil { + return nil + } + out := new(NodeResourcesLeastAllocatedArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeResourcesLeastAllocatedArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeResourcesMostAllocatedArgs) DeepCopyInto(out *NodeResourcesMostAllocatedArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesMostAllocatedArgs. +func (in *NodeResourcesMostAllocatedArgs) DeepCopy() *NodeResourcesMostAllocatedArgs { + if in == nil { + return nil + } + out := new(NodeResourcesMostAllocatedArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeResourcesMostAllocatedArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Plugin) DeepCopyInto(out *Plugin) { + *out = *in + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin. +func (in *Plugin) DeepCopy() *Plugin { + if in == nil { + return nil + } + out := new(Plugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginConfig) DeepCopyInto(out *PluginConfig) { + *out = *in + in.Args.DeepCopyInto(&out.Args) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig. +func (in *PluginConfig) DeepCopy() *PluginConfig { + if in == nil { + return nil + } + out := new(PluginConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PluginSet) DeepCopyInto(out *PluginSet) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]Plugin, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]Plugin, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet. +func (in *PluginSet) DeepCopy() *PluginSet { + if in == nil { + return nil + } + out := new(PluginSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Plugins) DeepCopyInto(out *Plugins) { + *out = *in + if in.QueueSort != nil { + in, out := &in.QueueSort, &out.QueueSort + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PreFilter != nil { + in, out := &in.PreFilter, &out.PreFilter + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PostFilter != nil { + in, out := &in.PostFilter, &out.PostFilter + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PreScore != nil { + in, out := &in.PreScore, &out.PreScore + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Score != nil { + in, out := &in.Score, &out.Score + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Reserve != nil { + in, out := &in.Reserve, &out.Reserve + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Permit != nil { + in, out := &in.Permit, &out.Permit + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PreBind != nil { + in, out := &in.PreBind, &out.PreBind + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.Bind != nil { + in, out := &in.Bind, &out.Bind + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + if in.PostBind != nil { + in, out := &in.PostBind, &out.PostBind + *out = new(PluginSet) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins. +func (in *Plugins) DeepCopy() *Plugins { + if in == nil { + return nil + } + out := new(Plugins) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTopologySpreadArgs) DeepCopyInto(out *PodTopologySpreadArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.DefaultConstraints != nil { + in, out := &in.DefaultConstraints, &out.DefaultConstraints + *out = make([]corev1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTopologySpreadArgs. +func (in *PodTopologySpreadArgs) DeepCopy() *PodTopologySpreadArgs { + if in == nil { + return nil + } + out := new(PodTopologySpreadArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodTopologySpreadArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestedToCapacityRatioArgs) DeepCopyInto(out *RequestedToCapacityRatioArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Shape != nil { + in, out := &in.Shape, &out.Shape + *out = make([]UtilizationShapePoint, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestedToCapacityRatioArgs. +func (in *RequestedToCapacityRatioArgs) DeepCopy() *RequestedToCapacityRatioArgs { + if in == nil { + return nil + } + out := new(RequestedToCapacityRatioArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RequestedToCapacityRatioArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. +func (in *ResourceSpec) DeepCopy() *ResourceSpec { + if in == nil { + return nil + } + out := new(ResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAffinityArgs) DeepCopyInto(out *ServiceAffinityArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.AffinityLabels != nil { + in, out := &in.AffinityLabels, &out.AffinityLabels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AntiAffinityLabelsPreference != nil { + in, out := &in.AntiAffinityLabelsPreference, &out.AntiAffinityLabelsPreference + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAffinityArgs. +func (in *ServiceAffinityArgs) DeepCopy() *ServiceAffinityArgs { + if in == nil { + return nil + } + out := new(ServiceAffinityArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAffinityArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UtilizationShapePoint) DeepCopyInto(out *UtilizationShapePoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UtilizationShapePoint. +func (in *UtilizationShapePoint) DeepCopy() *UtilizationShapePoint { + if in == nil { + return nil + } + out := new(UtilizationShapePoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeBindingArgs) DeepCopyInto(out *VolumeBindingArgs) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.BindTimeoutSeconds != nil { + in, out := &in.BindTimeoutSeconds, &out.BindTimeoutSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeBindingArgs. +func (in *VolumeBindingArgs) DeepCopy() *VolumeBindingArgs { + if in == nil { + return nil + } + out := new(VolumeBindingArgs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeBindingArgs) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/golang.org/x/crypto/bcrypt/base64.go b/vendor/golang.org/x/crypto/bcrypt/base64.go deleted file mode 100644 index fc3116090..000000000 --- a/vendor/golang.org/x/crypto/bcrypt/base64.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bcrypt - -import "encoding/base64" - -const alphabet = "./ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" - -var bcEncoding = base64.NewEncoding(alphabet) - -func base64Encode(src []byte) []byte { - n := bcEncoding.EncodedLen(len(src)) - dst := make([]byte, n) - bcEncoding.Encode(dst, src) - for dst[n-1] == '=' { - n-- - } - return dst[:n] -} - -func base64Decode(src []byte) ([]byte, error) { - numOfEquals := 4 - (len(src) % 4) - for i := 0; i < numOfEquals; i++ { - src = append(src, '=') - } - - dst := make([]byte, bcEncoding.DecodedLen(len(src))) - n, err := bcEncoding.Decode(dst, src) - if err != nil { - return nil, err - } - return dst[:n], nil -} diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go deleted file mode 100644 index aeb73f81a..000000000 --- a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt implements Provos and Mazières's bcrypt adaptive hashing -// algorithm. See http://www.usenix.org/event/usenix99/provos/provos.pdf -package bcrypt // import "golang.org/x/crypto/bcrypt" - -// The code is a port of Provos and Mazières's C implementation. -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "fmt" - "io" - "strconv" - - "golang.org/x/crypto/blowfish" -) - -const ( - MinCost int = 4 // the minimum allowable cost as passed in to GenerateFromPassword - MaxCost int = 31 // the maximum allowable cost as passed in to GenerateFromPassword - DefaultCost int = 10 // the cost that will actually be set if a cost below MinCost is passed into GenerateFromPassword -) - -// The error returned from CompareHashAndPassword when a password and hash do -// not match. -var ErrMismatchedHashAndPassword = errors.New("crypto/bcrypt: hashedPassword is not the hash of the given password") - -// The error returned from CompareHashAndPassword when a hash is too short to -// be a bcrypt hash. -var ErrHashTooShort = errors.New("crypto/bcrypt: hashedSecret too short to be a bcrypted password") - -// The error returned from CompareHashAndPassword when a hash was created with -// a bcrypt algorithm newer than this implementation. -type HashVersionTooNewError byte - -func (hv HashVersionTooNewError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt algorithm version '%c' requested is newer than current version '%c'", byte(hv), majorVersion) -} - -// The error returned from CompareHashAndPassword when a hash starts with something other than '$' -type InvalidHashPrefixError byte - -func (ih InvalidHashPrefixError) Error() string { - return fmt.Sprintf("crypto/bcrypt: bcrypt hashes must start with '$', but hashedSecret started with '%c'", byte(ih)) -} - -type InvalidCostError int - -func (ic InvalidCostError) Error() string { - return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), int(MinCost), int(MaxCost)) -} - -const ( - majorVersion = '2' - minorVersion = 'a' - maxSaltSize = 16 - maxCryptedHashSize = 23 - encodedSaltSize = 22 - encodedHashSize = 31 - minHashSize = 59 -) - -// magicCipherData is an IV for the 64 Blowfish encryption calls in -// bcrypt(). It's the string "OrpheanBeholderScryDoubt" in big-endian bytes. -var magicCipherData = []byte{ - 0x4f, 0x72, 0x70, 0x68, - 0x65, 0x61, 0x6e, 0x42, - 0x65, 0x68, 0x6f, 0x6c, - 0x64, 0x65, 0x72, 0x53, - 0x63, 0x72, 0x79, 0x44, - 0x6f, 0x75, 0x62, 0x74, -} - -type hashed struct { - hash []byte - salt []byte - cost int // allowed range is MinCost to MaxCost - major byte - minor byte -} - -// GenerateFromPassword returns the bcrypt hash of the password at the given -// cost. If the cost given is less than MinCost, the cost will be set to -// DefaultCost, instead. Use CompareHashAndPassword, as defined in this package, -// to compare the returned hashed password with its cleartext version. -func GenerateFromPassword(password []byte, cost int) ([]byte, error) { - p, err := newFromPassword(password, cost) - if err != nil { - return nil, err - } - return p.Hash(), nil -} - -// CompareHashAndPassword compares a bcrypt hashed password with its possible -// plaintext equivalent. Returns nil on success, or an error on failure. -func CompareHashAndPassword(hashedPassword, password []byte) error { - p, err := newFromHash(hashedPassword) - if err != nil { - return err - } - - otherHash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return err - } - - otherP := &hashed{otherHash, p.salt, p.cost, p.major, p.minor} - if subtle.ConstantTimeCompare(p.Hash(), otherP.Hash()) == 1 { - return nil - } - - return ErrMismatchedHashAndPassword -} - -// Cost returns the hashing cost used to create the given hashed -// password. When, in the future, the hashing cost of a password system needs -// to be increased in order to adjust for greater computational power, this -// function allows one to establish which passwords need to be updated. -func Cost(hashedPassword []byte) (int, error) { - p, err := newFromHash(hashedPassword) - if err != nil { - return 0, err - } - return p.cost, nil -} - -func newFromPassword(password []byte, cost int) (*hashed, error) { - if cost < MinCost { - cost = DefaultCost - } - p := new(hashed) - p.major = majorVersion - p.minor = minorVersion - - err := checkCost(cost) - if err != nil { - return nil, err - } - p.cost = cost - - unencodedSalt := make([]byte, maxSaltSize) - _, err = io.ReadFull(rand.Reader, unencodedSalt) - if err != nil { - return nil, err - } - - p.salt = base64Encode(unencodedSalt) - hash, err := bcrypt(password, p.cost, p.salt) - if err != nil { - return nil, err - } - p.hash = hash - return p, err -} - -func newFromHash(hashedSecret []byte) (*hashed, error) { - if len(hashedSecret) < minHashSize { - return nil, ErrHashTooShort - } - p := new(hashed) - n, err := p.decodeVersion(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - n, err = p.decodeCost(hashedSecret) - if err != nil { - return nil, err - } - hashedSecret = hashedSecret[n:] - - // The "+2" is here because we'll have to append at most 2 '=' to the salt - // when base64 decoding it in expensiveBlowfishSetup(). - p.salt = make([]byte, encodedSaltSize, encodedSaltSize+2) - copy(p.salt, hashedSecret[:encodedSaltSize]) - - hashedSecret = hashedSecret[encodedSaltSize:] - p.hash = make([]byte, len(hashedSecret)) - copy(p.hash, hashedSecret) - - return p, nil -} - -func bcrypt(password []byte, cost int, salt []byte) ([]byte, error) { - cipherData := make([]byte, len(magicCipherData)) - copy(cipherData, magicCipherData) - - c, err := expensiveBlowfishSetup(password, uint32(cost), salt) - if err != nil { - return nil, err - } - - for i := 0; i < 24; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(cipherData[i:i+8], cipherData[i:i+8]) - } - } - - // Bug compatibility with C bcrypt implementations. We only encode 23 of - // the 24 bytes encrypted. - hsh := base64Encode(cipherData[:maxCryptedHashSize]) - return hsh, nil -} - -func expensiveBlowfishSetup(key []byte, cost uint32, salt []byte) (*blowfish.Cipher, error) { - csalt, err := base64Decode(salt) - if err != nil { - return nil, err - } - - // Bug compatibility with C bcrypt implementations. They use the trailing - // NULL in the key string during expansion. - // We copy the key to prevent changing the underlying array. - ckey := append(key[:len(key):len(key)], 0) - - c, err := blowfish.NewSaltedCipher(ckey, csalt) - if err != nil { - return nil, err - } - - var i, rounds uint64 - rounds = 1 << cost - for i = 0; i < rounds; i++ { - blowfish.ExpandKey(ckey, c) - blowfish.ExpandKey(csalt, c) - } - - return c, nil -} - -func (p *hashed) Hash() []byte { - arr := make([]byte, 60) - arr[0] = '$' - arr[1] = p.major - n := 2 - if p.minor != 0 { - arr[2] = p.minor - n = 3 - } - arr[n] = '$' - n++ - copy(arr[n:], []byte(fmt.Sprintf("%02d", p.cost))) - n += 2 - arr[n] = '$' - n++ - copy(arr[n:], p.salt) - n += encodedSaltSize - copy(arr[n:], p.hash) - n += encodedHashSize - return arr[:n] -} - -func (p *hashed) decodeVersion(sbytes []byte) (int, error) { - if sbytes[0] != '$' { - return -1, InvalidHashPrefixError(sbytes[0]) - } - if sbytes[1] > majorVersion { - return -1, HashVersionTooNewError(sbytes[1]) - } - p.major = sbytes[1] - n := 3 - if sbytes[2] != '$' { - p.minor = sbytes[2] - n++ - } - return n, nil -} - -// sbytes should begin where decodeVersion left off. -func (p *hashed) decodeCost(sbytes []byte) (int, error) { - cost, err := strconv.Atoi(string(sbytes[0:2])) - if err != nil { - return -1, err - } - err = checkCost(cost) - if err != nil { - return -1, err - } - p.cost = cost - return 3, nil -} - -func (p *hashed) String() string { - return fmt.Sprintf("&{hash: %#v, salt: %#v, cost: %d, major: %c, minor: %c}", string(p.hash), p.salt, p.cost, p.major, p.minor) -} - -func checkCost(cost int) error { - if cost < MinCost || cost > MaxCost { - return InvalidCostError(cost) - } - return nil -} diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml index 819c1e2a9..a11e8cbca 100644 --- a/vendor/google.golang.org/grpc/.travis.yml +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -35,7 +35,7 @@ install: script: - set -e - - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; exit 0; fi + - if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi - if [[ -n "${VET}" ]]; then ./vet.sh; fi - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile index db982aabd..410f7d56d 100644 --- a/vendor/google.golang.org/grpc/Makefile +++ b/vendor/google.golang.org/grpc/Makefile @@ -19,6 +19,9 @@ proto: test: testdeps go test -cpu 1,4 -timeout 7m google.golang.org/grpc/... +testsubmodule: testdeps + cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/... + testappengine: testappenginedeps goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/... diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index afbc43db5..a54b6f560 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -93,6 +93,22 @@ To build Go code, there are several options: #### Compiling error, undefined: grpc.SupportPackageIsVersion +##### If you are using Go modules: + +Please ensure your gRPC-Go version is `require`d at the appropriate version in +the same module containing the generated `.pb.go` files. For example, +`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: + +``` +module + +require ( + google.golang.org/grpc v1.27.0 +) +``` + +##### If you are *not* using Go modules: + Please update proto package, gRPC package and rebuild the proto files: - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` - `go get -u google.golang.org/grpc` diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 531a174a8..9258858ed 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -203,11 +203,6 @@ type ConfigParser interface { ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error) } -// PickOptions is a type alias of PickInfo for legacy reasons. -// -// Deprecated: use PickInfo instead. -type PickOptions = PickInfo - // PickInfo contains additional information for the Pick operation. type PickInfo struct { // FullMethodName is the method name that NewClientStream() is called diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index d952f09f3..80559b80a 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -21,6 +21,7 @@ package base import ( "context" "errors" + "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" @@ -76,6 +77,9 @@ type baseBalancer struct { picker balancer.Picker v2Picker balancer.V2Picker config Config + + resolverErr error // the last error reported by the resolver; cleared on successful resolution + connErr error // the last connection error; cleared upon leaving TransientFailure } func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { @@ -83,13 +87,23 @@ func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) } func (b *baseBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - if b.picker != nil { - b.picker = NewErrPicker(err) - } else { - b.v2Picker = NewErrPickerV2(err) - } + b.resolverErr = err + if len(b.subConns) == 0 { + b.state = connectivity.TransientFailure + } + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.regeneratePicker() + if b.picker != nil { + b.cc.UpdateBalancerState(b.state, b.picker) + } else { + b.cc.UpdateState(balancer.State{ + ConnectivityState: b.state, + Picker: b.v2Picker, + }) } } @@ -99,6 +113,8 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { if grpclog.V(2) { grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s) } + // Successful resolution; clear resolver error and ensure we return nil. + b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. addrsSet := make(map[resolver.Address]struct{}) for _, a := range s.ResolverState.Addresses { @@ -124,27 +140,41 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // The entry will be deleted in HandleSubConnStateChange. } } + // If resolver state contains no addresses, return an error so ClientConn + // will trigger re-resolve. Also records this as an resolver error, so when + // the overall state turns transient failure, the error message will have + // the zero address information. + if len(s.ResolverState.Addresses) == 0 { + b.ResolverError(errors.New("produced zero addresses")) + return balancer.ErrBadResolverState + } return nil } +// mergeErrors builds an error from the last connection error and the last +// resolver error. Must only be called if b.state is TransientFailure. +func (b *baseBalancer) mergeErrors() error { + // connErr must always be non-nil unless there are no SubConns, in which + // case resolverErr must be non-nil. + if b.connErr == nil { + return fmt.Errorf("last resolver error: %v", b.resolverErr) + } + if b.resolverErr == nil { + return fmt.Errorf("last connection error: %v", b.connErr) + } + return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr) +} + // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is -// - errPicker with ErrTransientFailure if the balancer is in TransientFailure, +// - errPicker if the balancer is in TransientFailure, // - built by the pickerBuilder with all READY SubConns otherwise. -func (b *baseBalancer) regeneratePicker(err error) { +func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { if b.pickerBuilder != nil { b.picker = NewErrPicker(balancer.ErrTransientFailure) } else { - if err != nil { - b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(err)) - } else { - // This means the last subchannel transition was not to - // TransientFailure (otherwise err must be set), but the - // aggregate state of the balancer is TransientFailure, meaning - // there are no other addresses. - b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(errors.New("resolver returned no addresses"))) - } + b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors())) } return } @@ -187,6 +217,12 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } return } + if oldS == connectivity.TransientFailure && s == connectivity.Connecting { + // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent + // CONNECTING transitions to prevent the aggregated state from being + // always CONNECTING when many backends exist but are all down. + return + } b.scStates[sc] = s switch s { case connectivity.Idle: @@ -195,19 +231,20 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su // When an address was removed by resolver, b called RemoveSubConn but // kept the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) + case connectivity.TransientFailure: + // Save error to be reported via picker. + b.connErr = state.ConnectionError } - oldAggrState := b.state b.state = b.csEvltr.RecordTransition(oldS, s) // Regenerate picker when one of the following happens: - // - this sc became ready from not-ready - // - this sc became not-ready from ready - // - the aggregated state of balancer became TransientFailure from non-TransientFailure - // - the aggregated state of balancer became non-TransientFailure from TransientFailure + // - this sc entered or left ready + // - the aggregated state of balancer is TransientFailure + // (may need to update error message) if (s == connectivity.Ready) != (oldS == connectivity.Ready) || - (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { - b.regeneratePicker(state.ConnectionError) + b.state == connectivity.TransientFailure { + b.regeneratePicker() } if b.picker != nil { diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 824f28e74..f8667a23f 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -24,8 +24,8 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/buffer" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) @@ -245,7 +245,7 @@ func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { ac, err := cc.newAddrConn(addrs, opts) if err != nil { - grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + channelz.Warningf(acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) return } acbw.ac = ac diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 14ce9c76a..293d2f62f 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -35,10 +35,10 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/resolver" @@ -151,7 +151,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if channelz.IsOn() { if cc.dopts.channelzParentID != 0 { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(cc.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Channel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -161,10 +161,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * }) } else { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtINFO, - }) + channelz.Info(cc.channelzID, "Channel Created") } cc.csMgr.channelzID = cc.channelzID } @@ -239,25 +236,26 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if cc.dopts.bs == nil { cc.dopts.bs = backoff.DefaultExponential } - if cc.dopts.resolverBuilder == nil { - // Only try to parse target when resolver builder is not already set. - cc.parsedTarget = parseTarget(cc.target) - grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) - cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) - if cc.dopts.resolverBuilder == nil { - // If resolver builder is still nil, the parsed target's scheme is - // not registered. Fallback to default resolver and set Endpoint to - // the original target. - grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) - cc.parsedTarget = resolver.Target{ - Scheme: resolver.GetDefaultScheme(), - Endpoint: target, - } - cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) + + // Determine the resolver to use. + cc.parsedTarget = grpcutil.ParseTarget(cc.target) + channelz.Infof(cc.channelzID, "parsed scheme: %q", cc.parsedTarget.Scheme) + resolverBuilder := cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + // If resolver builder is still nil, the parsed target's scheme is + // not registered. Fallback to default resolver and set Endpoint to + // the original target. + channelz.Infof(cc.channelzID, "scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + cc.parsedTarget = resolver.Target{ + Scheme: resolver.GetDefaultScheme(), + Endpoint: target, + } + resolverBuilder = cc.getResolver(cc.parsedTarget.Scheme) + if resolverBuilder == nil { + return nil, fmt.Errorf("could not get resolver for default scheme: %q", cc.parsedTarget.Scheme) } - } else { - cc.parsedTarget = resolver.Target{Endpoint: target} } + creds := cc.dopts.copts.TransportCredentials if creds != nil && creds.Info().ServerName != "" { cc.authority = creds.Info().ServerName @@ -297,14 +295,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc) + rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) if err != nil { return nil, fmt.Errorf("failed to build resolver: %v", err) } - cc.mu.Lock() cc.resolverWrapper = rWrapper cc.mu.Unlock() + // A blocking dial blocks until the clientConn is ready. if cc.dopts.block { for { @@ -415,12 +413,7 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state - if channelz.IsOn() { - channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel Connectivity change to %v", state), - Severity: channelz.CtINFO, - }) - } + channelz.Infof(csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. close(csm.notifyChan) @@ -443,6 +436,20 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { return csm.notifyChan } +// ClientConnInterface defines the functions clients need to perform unary and +// streaming RPCs. It is implemented by *ClientConn, and is only intended to +// be referenced by generated code. +type ClientConnInterface interface { + // Invoke performs a unary RPC and returns after the response is received + // into reply. + Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + // NewStream begins a streaming RPC. + NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) +} + +// Assert *ClientConn implements ClientConnInterface. +var _ ClientConnInterface = (*ClientConn)(nil) + // ClientConn represents a virtual connection to a conceptual endpoint, to // perform RPCs. // @@ -656,9 +663,9 @@ func (cc *ClientConn) switchBalancer(name string) { return } - grpclog.Infof("ClientConn switching balancer to %q", name) + channelz.Infof(cc.channelzID, "ClientConn switching balancer to %q", name) if cc.dopts.balancerBuilder != nil { - grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead") + channelz.Info(cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") return } if cc.balancerWrapper != nil { @@ -666,22 +673,12 @@ func (cc *ClientConn) switchBalancer(name string) { } builder := balancer.Get(name) - if channelz.IsOn() { - if builder == nil { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName), - Severity: channelz.CtWarning, - }) - } else { - channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Channel switches to new LB policy %q", name), - Severity: channelz.CtINFO, - }) - } - } if builder == nil { - grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name) + channelz.Warningf(cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) + channelz.Infof(cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) builder = newPickfirstBuilder() + } else { + channelz.Infof(cc.channelzID, "Channel switches to new LB policy %q", name) } cc.curBalancerName = builder.Name() @@ -705,6 +702,7 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi // Caller needs to make sure len(addrs) > 0. func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { ac := &addrConn{ + state: connectivity.Idle, cc: cc, addrs: addrs, scopts: opts, @@ -721,7 +719,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub } if channelz.IsOn() { ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel Created", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -819,7 +817,7 @@ func (ac *addrConn) connect() error { func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() defer ac.mu.Unlock() - grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { @@ -839,7 +837,7 @@ func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { break } } - grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + channelz.Infof(ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) if curAddrFound { ac.addrs = addrs } @@ -1010,7 +1008,7 @@ func (cc *ClientConn) Close() error { Severity: channelz.CtINFO, } } - channelz.AddTraceEvent(cc.channelzID, ted) + channelz.AddTraceEvent(cc.channelzID, 0, ted) // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to // the entity being deleted, and thus prevent it from being deleted right away. channelz.RemoveEntry(cc.channelzID) @@ -1053,15 +1051,8 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } - - updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s) ac.state = s - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: updateMsg, - Severity: channelz.CtINFO, - }) - } + channelz.Infof(ac.channelzID, "Subchannel Connectivity change to %v", s) ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) } @@ -1198,12 +1189,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T } ac.mu.Unlock() - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr), - Severity: channelz.CtINFO, - }) - } + channelz.Infof(ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline) if err == nil { @@ -1285,7 +1271,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) + channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err) return nil, nil, err } @@ -1293,7 +1279,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne case <-time.After(time.Until(connectDeadline)): // We didn't get the preface in time. newTr.Close() - grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) + channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr) return nil, nil, errors.New("timed out waiting for server handshake") case <-prefaceReceived: // We got the preface - huzzah! things are good. @@ -1340,7 +1326,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // The health package is not imported to set health check function. // // TODO: add a link to the health check doc in the error message. - grpclog.Error("Health check is requested but health check function is not set.") + channelz.Error(ac.channelzID, "Health check is requested but health check function is not set.") return } @@ -1370,15 +1356,9 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName) if err != nil { if status.Code(err) == codes.Unimplemented { - if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ - Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled", - Severity: channelz.CtError, - }) - } - grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled") + channelz.Error(ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) } } }() @@ -1443,7 +1423,7 @@ func (ac *addrConn) tearDown(err error) { ac.mu.Lock() } if channelz.IsOn() { - channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel Deleted", Severity: channelz.CtINFO, Parent: &channelz.TraceEventDesc{ @@ -1542,3 +1522,12 @@ func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric { // Deprecated: This error is never returned by grpc and should not be // referenced by users. var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") + +func (cc *ClientConn) getResolver(scheme string) resolver.Builder { + for _, rb := range cc.dopts.resolvers { + if cc.parsedTarget.Scheme == rb.Scheme() { + return rb + } + } + return resolver.Get(cc.parsedTarget.Scheme) +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 667cf6b33..e438fda22 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -25,6 +25,7 @@ package credentials // import "google.golang.org/grpc/credentials" import ( "context" "errors" + "fmt" "net" "github.com/golang/protobuf/proto" @@ -50,6 +51,48 @@ type PerRPCCredentials interface { RequireTransportSecurity() bool } +// SecurityLevel defines the protection level on an established connection. +// +// This API is experimental. +type SecurityLevel int + +const ( + // NoSecurity indicates a connection is insecure. + // The zero SecurityLevel value is invalid for backward compatibility. + NoSecurity SecurityLevel = iota + 1 + // IntegrityOnly indicates a connection only provides integrity protection. + IntegrityOnly + // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection. + PrivacyAndIntegrity +) + +// String returns SecurityLevel in a string format. +func (s SecurityLevel) String() string { + switch s { + case NoSecurity: + return "NoSecurity" + case IntegrityOnly: + return "IntegrityOnly" + case PrivacyAndIntegrity: + return "PrivacyAndIntegrity" + } + return fmt.Sprintf("invalid SecurityLevel: %v", int(s)) +} + +// CommonAuthInfo contains authenticated information common to AuthInfo implementations. +// It should be embedded in a struct implementing AuthInfo to provide additional information +// about the credentials. +// +// This API is experimental. +type CommonAuthInfo struct { + SecurityLevel SecurityLevel +} + +// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct. +func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo { + return c +} + // ProtocolInfo provides information regarding the gRPC wire protocol version, // security protocol, security protocol version in use, server name, etc. type ProtocolInfo struct { @@ -57,13 +100,19 @@ type ProtocolInfo struct { ProtocolVersion string // SecurityProtocol is the security protocol in use. SecurityProtocol string - // SecurityVersion is the security protocol version. + // SecurityVersion is the security protocol version. It is a static version string from the + // credentials, not a value that reflects per-connection protocol negotiation. To retrieve + // details about the credentials used for a connection, use the Peer's AuthInfo field instead. + // + // Deprecated: please use Peer.AuthInfo. SecurityVersion string // ServerName is the user-configured server name. ServerName string } // AuthInfo defines the common interface for the auth information the users are interested in. +// A struct that implements AuthInfo should embed CommonAuthInfo by including additional +// information about the credentials in it. type AuthInfo interface { AuthType() string } @@ -78,7 +127,8 @@ type TransportCredentials interface { // ClientHandshake does the authentication handshake specified by the corresponding // authentication protocol on rawConn for clients. It returns the authenticated // connection and the corresponding auth information about the connection. - // Implementations must use the provided context to implement timely cancellation. + // The auth information should embed CommonAuthInfo to return additional information about + // the credentials. Implementations must use the provided context to implement timely cancellation. // gRPC will try to reconnect if the error returned is a temporary error // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). // If the returned error is a wrapper error, implementations should make sure that @@ -88,7 +138,8 @@ type TransportCredentials interface { ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) // ServerHandshake does the authentication handshake for servers. It returns // the authenticated connection and the corresponding auth information about - // the connection. + // the connection. The auth information should embed CommonAuthInfo to return additional information + // about the credentials. // // If the returned net.Conn is closed, it MUST close the net.Conn provided. ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) @@ -127,6 +178,8 @@ type Bundle interface { type RequestInfo struct { // The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method") Method string + // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake) + AuthInfo AuthInfo } // requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object. @@ -140,6 +193,32 @@ func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) { return } +// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one. +// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method +// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility. +// +// This API is experimental. +func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error { + type internalInfo interface { + GetCommonAuthInfo() *CommonAuthInfo + } + ri, _ := RequestInfoFromContext(ctx) + if ri.AuthInfo == nil { + return errors.New("unable to obtain SecurityLevel from context") + } + if ci, ok := ri.AuthInfo.(internalInfo); ok { + // CommonAuthInfo.SecurityLevel has an invalid value. + if ci.GetCommonAuthInfo().SecurityLevel == 0 { + return nil + } + if ci.GetCommonAuthInfo().SecurityLevel < level { + return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel) + } + } + // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method. + return nil +} + func init() { internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 7c3361368..28b4f6232 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -33,6 +33,7 @@ import ( // It implements the AuthInfo interface. type TLSInfo struct { State tls.ConnectionState + CommonAuthInfo } // AuthType returns the type of TLSInfo as a string. @@ -81,24 +82,28 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon errChannel := make(chan error, 1) go func() { errChannel <- conn.Handshake() + close(errChannel) }() select { case err := <-errChannel: if err != nil { + conn.Close() return nil, nil, err } case <-ctx.Done(): + conn.Close() return nil, nil, ctx.Err() } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil + return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil } func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { conn := tls.Server(rawConn, c.config) if err := conn.Handshake(); err != nil { + conn.Close() return nil, nil, err } - return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil + return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState(), CommonAuthInfo{PrivacyAndIntegrity}}, nil } func (c *tlsCreds) Clone() TransportCredentials { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 9af3eef7a..63f5ae21d 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -58,9 +58,7 @@ type dialOptions struct { callOptions []CallOption // This is used by v1 balancer dial option WithBalancer to support v1 // balancer, and also by WithBalancerName dial option. - balancerBuilder balancer.Builder - // This is to support grpclb. - resolverBuilder resolver.Builder + balancerBuilder balancer.Builder channelzParentID int64 disableServiceConfig bool disableRetry bool @@ -73,6 +71,7 @@ type dialOptions struct { // resolver.ResolveNow(). The user will have no need to configure this, but // we need to be able to configure this in tests. resolveNowBackoff func(int) time.Duration + resolvers []resolver.Builder } // DialOption configures how we set up the connection. @@ -231,13 +230,6 @@ func WithBalancerName(balancerName string) DialOption { }) } -// withResolverBuilder is only for grpclb. -func withResolverBuilder(b resolver.Builder) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.resolverBuilder = b - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -365,7 +357,6 @@ func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOp } func init() { - internal.WithResolverBuilder = withResolverBuilder internal.WithHealthCheckFunc = withHealthCheckFunc } @@ -589,3 +580,15 @@ func withResolveNowBackoff(f func(int) time.Duration) DialOption { o.resolveNowBackoff = f }) } + +// WithResolvers allows a list of resolver implementations to be registered +// locally with the ClientConn without needing to be globally registered via +// resolver.Register. They will be matched against the scheme used for the +// current Dial only, and will take precedence over the global registry. +// +// This API is EXPERIMENTAL. +func WithResolvers(rs ...resolver.Builder) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.resolvers = append(o.resolvers, rs...) + }) +} diff --git a/vendor/google.golang.org/grpc/go.mod b/vendor/google.golang.org/grpc/go.mod index 237836130..84c98c29f 100644 --- a/vendor/google.golang.org/grpc/go.mod +++ b/vendor/google.golang.org/grpc/go.mod @@ -3,11 +3,11 @@ module google.golang.org/grpc go 1.11 require ( - github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 + github.com/envoyproxy/go-control-plane v0.9.4 github.com/envoyproxy/protoc-gen-validate v0.1.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/mock v1.1.1 - github.com/golang/protobuf v1.3.2 + github.com/golang/protobuf v1.3.3 github.com/google/go-cmp v0.2.0 golang.org/x/net v0.0.0-20190311183353-d8887717615a golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be diff --git a/vendor/google.golang.org/grpc/go.sum b/vendor/google.golang.org/grpc/go.sum index dd5d0cee7..0bf9f0747 100644 --- a/vendor/google.golang.org/grpc/go.sum +++ b/vendor/google.golang.org/grpc/go.sum @@ -1,10 +1,15 @@ cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473 h1:4cmBvAEBNJaGARUEs3/suWRyfyBfhf7I60WBZq+bv2w= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= @@ -14,6 +19,8 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -21,6 +28,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -40,6 +48,7 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= @@ -49,5 +58,7 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2El google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index 874ea6d98..c8bb2be34 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -26,64 +26,70 @@ // verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. package grpclog // import "google.golang.org/grpc/grpclog" -import "os" +import ( + "os" -var logger = newLoggerV2() + "google.golang.org/grpc/internal/grpclog" +) + +func init() { + SetLoggerV2(newLoggerV2()) +} // V reports whether verbosity level l is at least the requested verbose level. func V(l int) bool { - return logger.V(l) + return grpclog.Logger.V(l) } // Info logs to the INFO log. func Info(args ...interface{}) { - logger.Info(args...) + grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. func Infof(format string, args ...interface{}) { - logger.Infof(format, args...) + grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. func Infoln(args ...interface{}) { - logger.Infoln(args...) + grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. func Warning(args ...interface{}) { - logger.Warning(args...) + grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. func Warningf(format string, args ...interface{}) { - logger.Warningf(format, args...) + grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. func Warningln(args ...interface{}) { - logger.Warningln(args...) + grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. func Error(args ...interface{}) { - logger.Error(args...) + grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. func Errorf(format string, args ...interface{}) { - logger.Errorf(format, args...) + grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. func Errorln(args ...interface{}) { - logger.Errorln(args...) + grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. func Fatal(args ...interface{}) { - logger.Fatal(args...) + grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -91,7 +97,7 @@ func Fatal(args ...interface{}) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. func Fatalf(format string, args ...interface{}) { - logger.Fatalf(format, args...) + grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -99,7 +105,7 @@ func Fatalf(format string, args ...interface{}) { // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. func Fatalln(args ...interface{}) { - logger.Fatalln(args...) + grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) } @@ -108,19 +114,19 @@ func Fatalln(args ...interface{}) { // // Deprecated: use Info. func Print(args ...interface{}) { - logger.Info(args...) + grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. func Printf(format string, args ...interface{}) { - logger.Infof(format, args...) + grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. func Println(args ...interface{}) { - logger.Infoln(args...) + grpclog.Logger.Infoln(args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index 097494f71..ef06a4822 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -18,6 +18,8 @@ package grpclog +import "google.golang.org/grpc/internal/grpclog" + // Logger mimics golang's standard Logger as an interface. // // Deprecated: use LoggerV2. @@ -35,7 +37,7 @@ type Logger interface { // // Deprecated: use SetLoggerV2. func SetLogger(l Logger) { - logger = &loggerWrapper{Logger: l} + grpclog.Logger = &loggerWrapper{Logger: l} } // loggerWrapper wraps Logger into a LoggerV2. diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index d49325776..23612b7c4 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -24,6 +24,8 @@ import ( "log" "os" "strconv" + + "google.golang.org/grpc/internal/grpclog" ) // LoggerV2 does underlying logging work for grpclog. @@ -65,7 +67,8 @@ type LoggerV2 interface { // SetLoggerV2 sets logger that is used in grpc to a V2 logger. // Not mutex-protected, should be called before any gRPC functions. func SetLoggerV2(l LoggerV2) { - logger = l + grpclog.Logger = l + grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2) } const ( @@ -193,3 +196,19 @@ func (g *loggerT) Fatalf(format string, args ...interface{}) { func (g *loggerT) V(l int) bool { return l <= g.v } + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// +// This API is EXPERIMENTAL. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + WarningDepth(depth int, args ...interface{}) + // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index f0744f993..e4252e5be 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -30,7 +30,7 @@ import ( "sync/atomic" "time" - "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/grpclog" ) const ( @@ -216,7 +216,7 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 { // by pid). It returns the unique channelz tracking id assigned to this subchannel. func RegisterSubChannel(c Channel, pid int64, ref string) int64 { if pid == 0 { - grpclog.Error("a SubChannel's parent id cannot be 0") + grpclog.ErrorDepth(0, "a SubChannel's parent id cannot be 0") return 0 } id := idGen.genID() @@ -253,7 +253,7 @@ func RegisterServer(s Server, ref string) int64 { // this listen socket. func RegisterListenSocket(s Socket, pid int64, ref string) int64 { if pid == 0 { - grpclog.Error("a ListenSocket's parent id cannot be 0") + grpclog.ErrorDepth(0, "a ListenSocket's parent id cannot be 0") return 0 } id := idGen.genID() @@ -268,7 +268,7 @@ func RegisterListenSocket(s Socket, pid int64, ref string) int64 { // this normal socket. func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { if pid == 0 { - grpclog.Error("a NormalSocket's parent id cannot be 0") + grpclog.ErrorDepth(0, "a NormalSocket's parent id cannot be 0") return 0 } id := idGen.genID() @@ -294,7 +294,19 @@ type TraceEventDesc struct { } // AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(id int64, desc *TraceEventDesc) { +func AddTraceEvent(id int64, depth int, desc *TraceEventDesc) { + for d := desc; d != nil; d = d.Parent { + switch d.Severity { + case CtUNKNOWN: + grpclog.InfoDepth(depth+1, d.Desc) + case CtINFO: + grpclog.InfoDepth(depth+1, d.Desc) + case CtWarning: + grpclog.WarningDepth(depth+1, d.Desc) + case CtError: + grpclog.ErrorDepth(depth+1, d.Desc) + } + } if getMaxTraceEntry() == 0 { return } diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go new file mode 100644 index 000000000..59c7bedec --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -0,0 +1,100 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "fmt" + + "google.golang.org/grpc/internal/grpclog" +) + +// Info logs through grpclog.Info and adds a trace event if channelz is on. +func Info(id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtINFO, + }) + } else { + grpclog.InfoDepth(1, args...) + } +} + +// Infof logs through grpclog.Infof and adds a trace event if channelz is on. +func Infof(id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtINFO, + }) + } else { + grpclog.InfoDepth(1, msg) + } +} + +// Warning logs through grpclog.Warning and adds a trace event if channelz is on. +func Warning(id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) + } else { + grpclog.WarningDepth(1, args...) + } +} + +// Warningf logs through grpclog.Warningf and adds a trace event if channelz is on. +func Warningf(id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtWarning, + }) + } else { + grpclog.WarningDepth(1, msg) + } +} + +// Error logs through grpclog.Error and adds a trace event if channelz is on. +func Error(id int64, args ...interface{}) { + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) + } else { + grpclog.ErrorDepth(1, args...) + } +} + +// Errorf logs through grpclog.Errorf and adds a trace event if channelz is on. +func Errorf(id int64, format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + if IsOn() { + AddTraceEvent(id, 1, &TraceEventDesc{ + Desc: msg, + Severity: CtError, + }) + } else { + grpclog.ErrorDepth(1, msg) + } +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 3ee8740f1..ae6c8972f 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -25,11 +25,14 @@ import ( ) const ( - prefix = "GRPC_GO_" - retryStr = prefix + "RETRY" + prefix = "GRPC_GO_" + retryStr = prefix + "RETRY" + txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" ) var ( // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on". Retry = strings.EqualFold(os.Getenv(retryStr), "on") + // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). + TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false") ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go new file mode 100644 index 000000000..8c8e19fce --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -0,0 +1,118 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog (internal) defines depth logging for grpc. +package grpclog + +// Logger is the logger used for the non-depth log functions. +var Logger LoggerV2 + +// DepthLogger is the logger used for the depth log functions. +var DepthLogger DepthLoggerV2 + +// InfoDepth logs to the INFO log at the specified depth. +func InfoDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.InfoDepth(depth, args...) + } else { + Logger.Info(args...) + } +} + +// WarningDepth logs to the WARNING log at the specified depth. +func WarningDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.WarningDepth(depth, args...) + } else { + Logger.Warning(args...) + } +} + +// ErrorDepth logs to the ERROR log at the specified depth. +func ErrorDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.ErrorDepth(depth, args...) + } else { + Logger.Error(args...) + } +} + +// FatalDepth logs to the FATAL log at the specified depth. +func FatalDepth(depth int, args ...interface{}) { + if DepthLogger != nil { + DepthLogger.FatalDepth(depth, args...) + } else { + Logger.Fatal(args...) + } +} + +// LoggerV2 does underlying logging work for grpclog. +// This is a copy of the LoggerV2 defined in the external grpclog package. It +// is defined here to avoid a circular dependency. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements +// DepthLoggerV2, the below functions will be called with the appropriate stack +// depth set for trivial functions the logger may ignore. +// This is a copy of the DepthLoggerV2 defined in the external grpclog package. +// It is defined here to avoid a circular dependency. +// +// This API is EXPERIMENTAL. +type DepthLoggerV2 interface { + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + InfoDepth(depth int, args ...interface{}) + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + WarningDepth(depth int, args ...interface{}) + // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + ErrorDepth(depth int, args ...interface{}) + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + FatalDepth(depth int, args ...interface{}) +} diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go new file mode 100644 index 000000000..f6e0dc1da --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -0,0 +1,63 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +// PrefixLogger does logging with a prefix. +// +// Logging method on a nil logs without any prefix. +type PrefixLogger struct { + prefix string +} + +// Infof does info logging. +func (pl *PrefixLogger) Infof(format string, args ...interface{}) { + if pl != nil { + // Handle nil, so the tests can pass in a nil logger. + format = pl.prefix + format + } + Logger.Infof(format, args...) +} + +// Warningf does warning logging. +func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + } + Logger.Warningf(format, args...) +} + +// Errorf does error logging. +func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { + if pl != nil { + format = pl.prefix + format + } + Logger.Errorf(format, args...) +} + +// Debugf does info logging at verbose level 2. +func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { + if Logger.V(2) { + pl.Infof(format, args...) + } +} + +// NewPrefixLogger creates a prefix logger with the given prefix. +func NewPrefixLogger(prefix string) *PrefixLogger { + return &PrefixLogger{prefix: prefix} +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/target.go b/vendor/google.golang.org/grpc/internal/grpcutil/target.go new file mode 100644 index 000000000..80b33cdaf --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/target.go @@ -0,0 +1,55 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpcutil provides a bunch of utility functions to be used across the +// gRPC codebase. +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/resolver" +) + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", "", false) instead. +func split2(s, sep string) (string, string, bool) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", "", false + } + return spl[0], spl[1], true +} + +// ParseTarget splits target into a resolver.Target struct containing scheme, +// authority and endpoint. +// +// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: +// target}. +func ParseTarget(target string) (ret resolver.Target) { + var ok bool + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") + if !ok { + return resolver.Target{Endpoint: target} + } + return ret +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index eae18e18c..0912f0bf4 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -28,8 +28,6 @@ import ( ) var ( - // WithResolverBuilder is set by dialoptions.go - WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption // WithHealthCheckFunc is set by dialoptions.go WithHealthCheckFunc interface{} // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 7705ca22e..c368db62e 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -33,6 +33,7 @@ import ( "time" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" @@ -204,8 +205,12 @@ func (d *dnsResolver) watcher() { case <-d.rn: } - state := d.lookup() - d.cc.UpdateState(*state) + state, err := d.lookup() + if err != nil { + d.cc.ReportError(err) + } else { + d.cc.UpdateState(*state) + } // Sleep to prevent excessive re-resolutions. Incoming resolution requests // will be queued in d.rn. @@ -219,33 +224,37 @@ func (d *dnsResolver) watcher() { } } -func (d *dnsResolver) lookupSRV() []resolver.Address { +func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { if !EnableSRVLookups { - return nil + return nil, nil } var newAddrs []resolver.Address _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host) if err != nil { - grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) - return nil + err = handleDNSError(err, "SRV") // may become nil + return nil, err } for _, s := range srvs { lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target) if err != nil { - grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err) - continue + err = handleDNSError(err, "A") // may become nil + if err == nil { + // If there are other SRV records, look them up and ignore this + // one that does not exist. + continue + } + return nil, err } for _, a := range lbAddrs { - a, ok := formatIP(a) + ip, ok := formatIP(a) if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) } - addr := a + ":" + strconv.Itoa(int(s.Port)) + addr := ip + ":" + strconv.Itoa(int(s.Port)) newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) } } - return newAddrs + return newAddrs, nil } var filterError = func(err error) error { @@ -258,13 +267,22 @@ var filterError = func(err error) error { return err } +func handleDNSError(err error, lookupType string) error { + err = filterError(err) + if err != nil { + err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err) + grpclog.Infoln(err) + } + return err +} + func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host) if err != nil { - err = filterError(err) - if err != nil { - err = fmt.Errorf("error from DNS TXT record lookup: %v", err) - grpclog.Infoln("grpc:", err) + if envconfig.TXTErrIgnore { + return nil + } + if err = handleDNSError(err, "TXT"); err != nil { return &serviceconfig.ParseResult{Err: err} } return nil @@ -276,7 +294,7 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { // TXT record must have "grpc_config=" attribute in order to be used as service config. if !strings.HasPrefix(res, txtAttribute) { - grpclog.Warningf("grpc: DNS TXT record %v missing %v attribute", res, txtAttribute) + grpclog.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) // This is not an error; it is the equivalent of not having a service config. return nil } @@ -284,34 +302,37 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { return d.cc.ParseServiceConfig(sc) } -func (d *dnsResolver) lookupHost() []resolver.Address { +func (d *dnsResolver) lookupHost() ([]resolver.Address, error) { var newAddrs []resolver.Address addrs, err := d.resolver.LookupHost(d.ctx, d.host) if err != nil { - grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) - return nil + err = handleDNSError(err, "A") + return nil, err } for _, a := range addrs { - a, ok := formatIP(a) + ip, ok := formatIP(a) if !ok { - grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) - continue + return nil, fmt.Errorf("dns: error parsing A record IP address %v", a) } - addr := a + ":" + d.port + addr := ip + ":" + d.port newAddrs = append(newAddrs, resolver.Address{Addr: addr}) } - return newAddrs + return newAddrs, nil } -func (d *dnsResolver) lookup() *resolver.State { - srv := d.lookupSRV() +func (d *dnsResolver) lookup() (*resolver.State, error) { + srv, srvErr := d.lookupSRV() + addrs, hostErr := d.lookupHost() + if hostErr != nil && (srvErr != nil || len(srv) == 0) { + return nil, hostErr + } state := &resolver.State{ - Addresses: append(d.lookupHost(), srv...), + Addresses: append(addrs, srv...), } if !d.disableServiceConfig { state.ServiceConfig = d.lookupTXT() } - return state + return state, nil } // formatIP returns ok = false if addr is not a valid textual representation of an IP address. @@ -397,12 +418,12 @@ func canaryingSC(js string) string { var rcs []rawChoice err := json.Unmarshal([]byte(js), &rcs) if err != nil { - grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err) + grpclog.Warningf("dns: error parsing service config json: %v", err) return "" } cliHostname, err := os.Hostname() if err != nil { - grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err) + grpclog.Warningf("dns: error getting client hostname: %v", err) return "" } var sc string diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index fbf01d5fe..228b82184 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -338,7 +338,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace Addr: ht.RemoteAddr(), } if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) s.ctx = peer.NewContext(ctx, pr) diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index e18935653..b1b82ec95 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -403,7 +403,8 @@ func (t *http2Client) getPeer() *peer.Peer { func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { aud := t.createAudience(callHdr) ri := credentials.RequestInfo{ - Method: callHdr.Method, + Method: callHdr.Method, + AuthInfo: t.authInfo, } ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri) authData, err := t.getTrAuthData(ctxWithRequestInfo, aud) @@ -679,14 +680,19 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea } } if t.statsHandler != nil { - header, _, _ := metadata.FromOutgoingContextRaw(ctx) + header, ok := metadata.FromOutgoingContext(ctx) + if ok { + header.Set("user-agent", t.userAgent) + } else { + header = metadata.Pairs("user-agent", t.userAgent) + } outHeader := &stats.OutHeader{ Client: true, FullMethod: callHdr.Method, RemoteAddr: t.remoteAddr, LocalAddr: t.localAddr, Compression: callHdr.SendCompress, - Header: header.Copy(), + Header: header, } t.statsHandler.HandleRPC(s.ctx, outHeader) } diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 03567d7be..fe14b2fb9 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -124,11 +124,6 @@ type Address struct { Metadata interface{} } -// BuildOption is a type alias of BuildOptions for legacy reasons. -// -// Deprecated: use BuildOptions instead. -type BuildOption = BuildOptions - // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { @@ -235,11 +230,6 @@ type Builder interface { Scheme() string } -// ResolveNowOption is a type alias of ResolveNowOptions for legacy reasons. -// -// Deprecated: use ResolveNowOptions instead. -type ResolveNowOption = ResolveNowOptions - // ResolveNowOptions includes additional information for ResolveNow. type ResolveNowOptions struct{} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 89ba9fa3e..edfda866c 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -26,7 +26,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" @@ -34,7 +33,7 @@ import ( ) // ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConnection interface. +// It implements resolver.ClientConn interface. type ccResolverWrapper struct { cc *ClientConn resolverMu sync.Mutex @@ -46,43 +45,9 @@ type ccResolverWrapper struct { polling chan struct{} } -// split2 returns the values from strings.SplitN(s, sep, 2). -// If sep is not found, it returns ("", "", false) instead. -func split2(s, sep string) (string, string, bool) { - spl := strings.SplitN(s, sep, 2) - if len(spl) < 2 { - return "", "", false - } - return spl[0], spl[1], true -} - -// parseTarget splits target into a struct containing scheme, authority and -// endpoint. -// -// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: -// target}. -func parseTarget(target string) (ret resolver.Target) { - var ok bool - ret.Scheme, ret.Endpoint, ok = split2(target, "://") - if !ok { - return resolver.Target{Endpoint: target} - } - ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") - if !ok { - return resolver.Target{Endpoint: target} - } - return ret -} - -// newCCResolverWrapper uses the resolver.Builder stored in the ClientConn to -// build a Resolver and returns a ccResolverWrapper object which wraps the -// newly built resolver. -func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { - rb := cc.dopts.resolverBuilder - if rb == nil { - return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme) - } - +// newCCResolverWrapper uses the resolver.Builder to build a Resolver and +// returns a ccResolverWrapper object which wraps the newly built resolver. +func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { ccr := &ccResolverWrapper{ cc: cc, done: grpcsync.NewEvent(), @@ -175,7 +140,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) { if ccr.done.HasFired() { return } - grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s) + channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) if channelz.IsOn() { ccr.addChannelzTraceEvent(s) } @@ -187,13 +152,7 @@ func (ccr *ccResolverWrapper) ReportError(err error) { if ccr.done.HasFired() { return } - grpclog.Warningf("ccResolverWrapper: reporting error to cc: %v", err) - if channelz.IsOn() { - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver reported error: %v", err), - Severity: channelz.CtWarning, - }) - } + channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err)) } @@ -202,7 +161,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { if ccr.done.HasFired() { return } - grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) + channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) if channelz.IsOn() { ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) } @@ -216,20 +175,14 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } - grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) if ccr.cc.dopts.disableServiceConfig { - grpclog.Infof("Service config lookups disabled; ignoring config") + channelz.Info(ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return } scpr := parseServiceConfig(sc) if scpr.Err != nil { - grpclog.Warningf("ccResolverWrapper: error parsing service config: %v", scpr.Err) - if channelz.IsOn() { - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Error parsing service config: %v", scpr.Err), - Severity: channelz.CtWarning, - }) - } + channelz.Warningf(ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) ccr.poll(balancer.ErrBadResolverState) return } @@ -262,7 +215,7 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{ + channelz.AddTraceEvent(ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), Severity: channelz.CtINFO, }) diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index edaba7957..cf9dbe7fd 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -287,13 +287,14 @@ func (o FailFastCallOption) before(c *callInfo) error { } func (o FailFastCallOption) after(c *callInfo) {} -// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. -func MaxCallRecvMsgSize(s int) CallOption { - return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s} +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can receive. +func MaxCallRecvMsgSize(bytes int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message -// size the client can receive. +// size in bytes the client can receive. // This is an EXPERIMENTAL API. type MaxRecvMsgSizeCallOption struct { MaxRecvMsgSize int @@ -305,13 +306,14 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { } func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} -// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. -func MaxCallSendMsgSize(s int) CallOption { - return MaxSendMsgSizeCallOption{MaxSendMsgSize: s} +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size +// in bytes the client can send. +func MaxCallSendMsgSize(bytes int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message -// size the client can send. +// size in bytes the client can send. // This is an EXPERIMENTAL API. type MaxSendMsgSizeCallOption struct { MaxSendMsgSize int @@ -871,7 +873,7 @@ type channelzData struct { // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest -// support package version is 5. +// support package version is 6. // // Older versions are kept for compatibility. They may be removed if // compatibility cannot be maintained. @@ -881,6 +883,7 @@ const ( SupportPackageIsVersion3 = true SupportPackageIsVersion4 = true SupportPackageIsVersion5 = true + SupportPackageIsVersion6 = true ) const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 0d75cb109..edfcdcaee 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -116,6 +116,8 @@ type serverOptions struct { dc Decompressor unaryInt UnaryServerInterceptor streamInt StreamServerInterceptor + chainUnaryInts []UnaryServerInterceptor + chainStreamInts []StreamServerInterceptor inTapHandle tap.ServerInHandle statsHandler stats.Handler maxConcurrentStreams uint32 @@ -311,6 +313,16 @@ func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { }) } +// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor +// for unary RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All unary interceptors added by this method will be chained. +func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainUnaryInts = append(o.chainUnaryInts, interceptors...) + }) +} + // StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the // server. Only one stream interceptor can be installed. func StreamInterceptor(i StreamServerInterceptor) ServerOption { @@ -322,6 +334,16 @@ func StreamInterceptor(i StreamServerInterceptor) ServerOption { }) } +// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor +// for stream RPCs. The first interceptor will be the outer most, +// while the last interceptor will be the inner most wrapper around the real call. +// All stream interceptors added by this method will be chained. +func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.chainStreamInts = append(o.chainStreamInts, interceptors...) + }) +} + // InTapHandle returns a ServerOption that sets the tap handle for all the server // transport to be created. Only one can be installed. func InTapHandle(h tap.ServerInHandle) ServerOption { @@ -404,6 +426,8 @@ func NewServer(opt ...ServerOption) *Server { done: grpcsync.NewEvent(), czData: new(channelzData), } + chainUnaryServerInterceptors(s) + chainStreamServerInterceptors(s) s.cv = sync.NewCond(&s.mu) if EnableTracing { _, file, line, _ := runtime.Caller(1) @@ -658,7 +682,7 @@ func (s *Server) handleRawConn(rawConn net.Conn) { s.mu.Lock() s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) s.mu.Unlock() - grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + channelz.Warningf(s.channelzID, "grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) rawConn.Close() } rawConn.SetDeadline(time.Time{}) @@ -705,7 +729,7 @@ func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) tr s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) s.mu.Unlock() c.Close() - grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + channelz.Warning(s.channelzID, "grpc: Server.Serve failed to create ServerTransport: ", err) return nil } @@ -844,12 +868,12 @@ func (s *Server) incrCallsFailed() { func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { - grpclog.Errorln("grpc: server failed to encode response: ", err) + channelz.Error(s.channelzID, "grpc: server failed to encode response: ", err) return err } compData, err := compress(data, cp, comp) if err != nil { - grpclog.Errorln("grpc: server failed to compress response: ", err) + channelz.Error(s.channelzID, "grpc: server failed to compress response: ", err) return err } hdr, payload := msgHeader(data, compData) @@ -864,6 +888,40 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str return err } +// chainUnaryServerInterceptors chains all unary server interceptors into one. +func chainUnaryServerInterceptors(s *Server) { + // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainUnaryInts + if s.opts.unaryInt != nil { + interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...) + } + + var chainedInt UnaryServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } + } + + s.opts.unaryInt = chainedInt +} + +// getChainUnaryHandler recursively generate the chained UnaryHandler +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) + } +} + func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { sh := s.opts.statsHandler if sh != nil || trInfo != nil || channelz.IsOn() { @@ -989,7 +1047,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil { if st, ok := status.FromError(err); ok { if e := t.WriteStatus(stream, st); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) } } return err @@ -1034,7 +1092,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. trInfo.tr.SetError() } if e := t.WriteStatus(stream, appStatus); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } if binlog != nil { if h, _ := stream.Header(); h.Len() > 0 { @@ -1061,9 +1119,9 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. // The entire stream is done (for unary RPC only). return err } - if s, ok := status.FromError(err); ok { - if e := t.WriteStatus(stream, s); e != nil { - grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + if sts, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, sts); e != nil { + channelz.Warningf(s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } } else { switch st := err.(type) { @@ -1113,6 +1171,40 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. return err } +// chainStreamServerInterceptors chains all stream server interceptors into one. +func chainStreamServerInterceptors(s *Server) { + // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will + // be executed before any other chained interceptors. + interceptors := s.opts.chainStreamInts + if s.opts.streamInt != nil { + interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...) + } + + var chainedInt StreamServerInterceptor + if len(interceptors) == 0 { + chainedInt = nil + } else if len(interceptors) == 1 { + chainedInt = interceptors[0] + } else { + chainedInt = func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } + } + + s.opts.streamInt = chainedInt +} + +// getChainStreamHandler recursively generate the chained StreamHandler +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + + return func(srv interface{}, ss ServerStream) error { + return interceptors[curr+1](srv, ss, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) + } +} + func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() @@ -1297,7 +1389,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() @@ -1338,7 +1430,7 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) trInfo.tr.SetError() } - grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + channelz.Warningf(s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } if trInfo != nil { trInfo.tr.Finish() diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 4f8836d48..5a80a575a 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -136,9 +136,9 @@ type retryPolicy struct { maxAttempts int // Exponential backoff parameters. The initial retry attempt will occur at - // random(0, initialBackoffMS). In general, the nth attempt will occur at + // random(0, initialBackoff). In general, the nth attempt will occur at // random(0, - // min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)). + // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)). // // These fields are required and must be greater than zero. initialBackoff time.Duration diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index bb99940e3..934ef6832 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -31,7 +31,6 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" - "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" @@ -498,13 +497,13 @@ func (cs *clientStream) shouldRetry(err error) error { if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { - grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0]) + channelz.Infof(cs.cc.channelzID, "Server retry pushback specified to abort (%q).", sps[0]) cs.retryThrottler.throttle() // This counts as a failure for throttling. return err } hasPushback = true } else if len(sps) > 1 { - grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps) + channelz.Warningf(cs.cc.channelzID, "Server retry pushback specified multiple values (%q); not retrying.", sps) cs.retryThrottler.throttle() // This counts as a failure for throttling. return err } diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 0a57b9994..07a2d26b3 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -41,9 +41,6 @@ func methodFamily(m string) string { if i := strings.Index(m, "/"); i >= 0 { m = m[:i] // remove everything from second slash } - if i := strings.LastIndex(m, "."); i >= 0 { - m = m[i+1:] // cut down to last dotted component - } return m } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 1d3b043ec..66d09b4ab 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.26.0" +const Version = "1.28.1" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 798921acc..f077fe775 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -81,7 +81,7 @@ fi # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|wrr_test') +git grep -l '"math/rand"' -- "*.go" 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test') # - Ensure all ptypes proto packages are renamed when importing. (! git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go") @@ -94,7 +94,7 @@ go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go") golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:") -go vet -all . +go vet -all ./... misspell -error . @@ -151,6 +151,7 @@ grpc.WithMaxMsgSize grpc.WithServiceConfig grpc.WithTimeout http.CloseNotifier +info.SecurityVersion naming.Resolver naming.Update naming.Watcher diff --git a/vendor/k8s.io/kube-scheduler/LICENSE b/vendor/k8s.io/kube-scheduler/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/kube-scheduler/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kube-scheduler/config/v1/doc.go b/vendor/k8s.io/kube-scheduler/config/v1/doc.go new file mode 100644 index 000000000..a94d31939 --- /dev/null +++ b/vendor/k8s.io/kube-scheduler/config/v1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +groupName=kubescheduler.config.k8s.io + +package v1 // import "k8s.io/kube-scheduler/config/v1" diff --git a/vendor/k8s.io/kube-scheduler/config/v1/register.go b/vendor/k8s.io/kube-scheduler/config/v1/register.go new file mode 100644 index 000000000..d5fcf8315 --- /dev/null +++ b/vendor/k8s.io/kube-scheduler/config/v1/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name used in this package +const GroupName = "kubescheduler.config.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a global function that registers this API group & version to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes registers known types to the given scheme +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Policy{}, + ) + // also register into the v1 group for API backward compatibility + scheme.AddKnownTypes(schema.GroupVersion{Group: "", Version: "v1"}, &Policy{}) + return nil +} diff --git a/vendor/k8s.io/kube-scheduler/config/v1/types.go b/vendor/k8s.io/kube-scheduler/config/v1/types.go new file mode 100644 index 000000000..a7dbdbb97 --- /dev/null +++ b/vendor/k8s.io/kube-scheduler/config/v1/types.go @@ -0,0 +1,242 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + gojson "encoding/json" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Policy describes a struct for a policy resource used in api. +type Policy struct { + metav1.TypeMeta `json:",inline"` + // Holds the information to configure the fit predicate functions + Predicates []PredicatePolicy `json:"predicates"` + // Holds the information to configure the priority functions + Priorities []PriorityPolicy `json:"priorities"` + // Holds the information to communicate with the extender(s) + Extenders []Extender `json:"extenders"` + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // HardPodAffinitySymmetricWeight represents the weight of implicit PreferredDuringScheduling affinity rule, in the range 1-100. + HardPodAffinitySymmetricWeight int32 `json:"hardPodAffinitySymmetricWeight"` + + // When AlwaysCheckAllPredicates is set to true, scheduler checks all + // the configured predicates even after one or more of them fails. + // When the flag is set to false, scheduler skips checking the rest + // of the predicates after it finds one predicate that failed. + AlwaysCheckAllPredicates bool `json:"alwaysCheckAllPredicates"` +} + +// PredicatePolicy describes a struct of a predicate policy. +type PredicatePolicy struct { + // Identifier of the predicate policy + // For a custom predicate, the name can be user-defined + // For the Kubernetes provided predicates, the name is the identifier of the pre-defined predicate + Name string `json:"name"` + // Holds the parameters to configure the given predicate + Argument *PredicateArgument `json:"argument"` +} + +// PriorityPolicy describes a struct of a priority policy. +type PriorityPolicy struct { + // Identifier of the priority policy + // For a custom priority, the name can be user-defined + // For the Kubernetes provided priority functions, the name is the identifier of the pre-defined priority function + Name string `json:"name"` + // The numeric multiplier for the node scores that the priority function generates + // The weight should be non-zero and can be a positive or a negative integer + Weight int64 `json:"weight"` + // Holds the parameters to configure the given priority function + Argument *PriorityArgument `json:"argument"` +} + +// PredicateArgument represents the arguments to configure predicate functions in scheduler policy configuration. +// Only one of its members may be specified +type PredicateArgument struct { + // The predicate that provides affinity for pods belonging to a service + // It uses a label to identify nodes that belong to the same "group" + ServiceAffinity *ServiceAffinity `json:"serviceAffinity"` + // The predicate that checks whether a particular node has a certain label + // defined or not, regardless of value + LabelsPresence *LabelsPresence `json:"labelsPresence"` +} + +// PriorityArgument represents the arguments to configure priority functions in scheduler policy configuration. +// Only one of its members may be specified +type PriorityArgument struct { + // The priority function that ensures a good spread (anti-affinity) for pods belonging to a service + // It uses a label to identify nodes that belong to the same "group" + ServiceAntiAffinity *ServiceAntiAffinity `json:"serviceAntiAffinity"` + // The priority function that checks whether a particular node has a certain label + // defined or not, regardless of value + LabelPreference *LabelPreference `json:"labelPreference"` + // The RequestedToCapacityRatio priority function is parametrized with function shape. + RequestedToCapacityRatioArguments *RequestedToCapacityRatioArguments `json:"requestedToCapacityRatioArguments"` +} + +// ServiceAffinity holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration. +type ServiceAffinity struct { + // The list of labels that identify node "groups" + // All of the labels should match for the node to be considered a fit for hosting the pod + Labels []string `json:"labels"` +} + +// LabelsPresence holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration. +type LabelsPresence struct { + // The list of labels that identify node "groups" + // All of the labels should be either present (or absent) for the node to be considered a fit for hosting the pod + Labels []string `json:"labels"` + // The boolean flag that indicates whether the labels should be present or absent from the node + Presence bool `json:"presence"` +} + +// ServiceAntiAffinity holds the parameters that are used to configure the corresponding priority function +type ServiceAntiAffinity struct { + // Used to identify node "groups" + Label string `json:"label"` +} + +// LabelPreference holds the parameters that are used to configure the corresponding priority function +type LabelPreference struct { + // Used to identify node "groups" + Label string `json:"label"` + // This is a boolean flag + // If true, higher priority is given to nodes that have the label + // If false, higher priority is given to nodes that do not have the label + Presence bool `json:"presence"` +} + +// RequestedToCapacityRatioArguments holds arguments specific to RequestedToCapacityRatio priority function. +type RequestedToCapacityRatioArguments struct { + // Array of point defining priority function shape. + Shape []UtilizationShapePoint `json:"shape"` + Resources []ResourceSpec `json:"resources,omitempty"` +} + +// UtilizationShapePoint represents single point of priority function shape. +type UtilizationShapePoint struct { + // Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100. + Utilization int32 `json:"utilization"` + // Score assigned to given utilization (y axis). Valid values are 0 to 10. + Score int32 `json:"score"` +} + +// ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments. +type ResourceSpec struct { + // Name of the resource to be managed by RequestedToCapacityRatio function. + Name string `json:"name"` + // Weight of the resource. + Weight int64 `json:"weight,omitempty"` +} + +// ExtenderManagedResource describes the arguments of extended resources +// managed by an extender. +type ExtenderManagedResource struct { + // Name is the extended resource name. + Name string `json:"name"` + // IgnoredByScheduler indicates whether kube-scheduler should ignore this + // resource when applying predicates. + IgnoredByScheduler bool `json:"ignoredByScheduler,omitempty"` +} + +// ExtenderTLSConfig contains settings to enable TLS with extender +type ExtenderTLSConfig struct { + // Server should be accessed without verifying the TLS certificate. For testing only. + Insecure bool `json:"insecure,omitempty"` + // ServerName is passed to the server for SNI and is used in the client to check server + // certificates against. If ServerName is empty, the hostname used to contact the + // server is used. + ServerName string `json:"serverName,omitempty"` + + // Server requires TLS client certificate authentication + CertFile string `json:"certFile,omitempty"` + // Server requires TLS client certificate authentication + KeyFile string `json:"keyFile,omitempty"` + // Trusted root certificates for server + CAFile string `json:"caFile,omitempty"` + + // CertData holds PEM-encoded bytes (typically read from a client certificate file). + // CertData takes precedence over CertFile + CertData []byte `json:"certData,omitempty"` + // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). + // KeyData takes precedence over KeyFile + KeyData []byte `json:"keyData,omitempty"` + // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). + // CAData takes precedence over CAFile + CAData []byte `json:"caData,omitempty"` +} + +// Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, +// it is assumed that the extender chose not to provide that extension. +type Extender struct { + // URLPrefix at which the extender is available + URLPrefix string `json:"urlPrefix"` + // Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender. + FilterVerb string `json:"filterVerb,omitempty"` + // Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender. + PreemptVerb string `json:"preemptVerb,omitempty"` + // Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender. + PrioritizeVerb string `json:"prioritizeVerb,omitempty"` + // The numeric multiplier for the node scores that the prioritize call generates. + // The weight should be a positive integer + Weight int64 `json:"weight,omitempty"` + // Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. + // If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender + // can implement this function. + BindVerb string `json:"bindVerb,omitempty"` + // EnableHTTPS specifies whether https should be used to communicate with the extender + EnableHTTPS bool `json:"enableHttps,omitempty"` + // TLSConfig specifies the transport layer security config + TLSConfig *ExtenderTLSConfig `json:"tlsConfig,omitempty"` + // HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize + // timeout is ignored, k8s/other extenders priorities are used to select the node. + HTTPTimeout time.Duration `json:"httpTimeout,omitempty"` + // NodeCacheCapable specifies that the extender is capable of caching node information, + // so the scheduler should only send minimal information about the eligible nodes + // assuming that the extender already cached full details of all nodes in the cluster + NodeCacheCapable bool `json:"nodeCacheCapable,omitempty"` + // ManagedResources is a list of extended resources that are managed by + // this extender. + // - A pod will be sent to the extender on the Filter, Prioritize and Bind + // (if the extender is the binder) phases iff the pod requests at least + // one of the extended resources in this list. If empty or unspecified, + // all pods will be sent to this extender. + // - If IgnoredByScheduler is set to true for a resource, kube-scheduler + // will skip checking the resource in predicates. + // +optional + ManagedResources []ExtenderManagedResource `json:"managedResources,omitempty"` + // Ignorable specifies if the extender is ignorable, i.e. scheduling should not + // fail when the extender returns an error or is not reachable. + Ignorable bool `json:"ignorable,omitempty"` +} + +// caseInsensitiveExtender is a type alias which lets us use the stdlib case-insensitive decoding +// to preserve compatibility with incorrectly specified scheduler config fields: +// * BindVerb, which originally did not specify a json tag, and required upper-case serialization in 1.7 +// * TLSConfig, which uses a struct not intended for serialization, and does not include any json tags +type caseInsensitiveExtender *Extender + +// UnmarshalJSON implements the json.Unmarshaller interface. +// This preserves compatibility with incorrect case-insensitive configuration fields. +func (t *Extender) UnmarshalJSON(b []byte) error { + return gojson.Unmarshal(b, caseInsensitiveExtender(t)) +} diff --git a/vendor/k8s.io/kube-scheduler/config/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kube-scheduler/config/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..211db388b --- /dev/null +++ b/vendor/k8s.io/kube-scheduler/config/v1/zz_generated.deepcopy.go @@ -0,0 +1,375 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Extender) DeepCopyInto(out *Extender) { + *out = *in + if in.TLSConfig != nil { + in, out := &in.TLSConfig, &out.TLSConfig + *out = new(ExtenderTLSConfig) + (*in).DeepCopyInto(*out) + } + if in.ManagedResources != nil { + in, out := &in.ManagedResources, &out.ManagedResources + *out = make([]ExtenderManagedResource, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extender. +func (in *Extender) DeepCopy() *Extender { + if in == nil { + return nil + } + out := new(Extender) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderManagedResource. +func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource { + if in == nil { + return nil + } + out := new(ExtenderManagedResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtenderTLSConfig) DeepCopyInto(out *ExtenderTLSConfig) { + *out = *in + if in.CertData != nil { + in, out := &in.CertData, &out.CertData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.KeyData != nil { + in, out := &in.KeyData, &out.KeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.CAData != nil { + in, out := &in.CAData, &out.CAData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderTLSConfig. +func (in *ExtenderTLSConfig) DeepCopy() *ExtenderTLSConfig { + if in == nil { + return nil + } + out := new(ExtenderTLSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelPreference) DeepCopyInto(out *LabelPreference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelPreference. +func (in *LabelPreference) DeepCopy() *LabelPreference { + if in == nil { + return nil + } + out := new(LabelPreference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelsPresence) DeepCopyInto(out *LabelsPresence) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelsPresence. +func (in *LabelsPresence) DeepCopy() *LabelsPresence { + if in == nil { + return nil + } + out := new(LabelsPresence) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Predicates != nil { + in, out := &in.Predicates, &out.Predicates + *out = make([]PredicatePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priorities != nil { + in, out := &in.Priorities, &out.Priorities + *out = make([]PriorityPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Extenders != nil { + in, out := &in.Extenders, &out.Extenders + *out = make([]Extender, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Policy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredicateArgument) DeepCopyInto(out *PredicateArgument) { + *out = *in + if in.ServiceAffinity != nil { + in, out := &in.ServiceAffinity, &out.ServiceAffinity + *out = new(ServiceAffinity) + (*in).DeepCopyInto(*out) + } + if in.LabelsPresence != nil { + in, out := &in.LabelsPresence, &out.LabelsPresence + *out = new(LabelsPresence) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicateArgument. +func (in *PredicateArgument) DeepCopy() *PredicateArgument { + if in == nil { + return nil + } + out := new(PredicateArgument) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PredicatePolicy) DeepCopyInto(out *PredicatePolicy) { + *out = *in + if in.Argument != nil { + in, out := &in.Argument, &out.Argument + *out = new(PredicateArgument) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PredicatePolicy. +func (in *PredicatePolicy) DeepCopy() *PredicatePolicy { + if in == nil { + return nil + } + out := new(PredicatePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityArgument) DeepCopyInto(out *PriorityArgument) { + *out = *in + if in.ServiceAntiAffinity != nil { + in, out := &in.ServiceAntiAffinity, &out.ServiceAntiAffinity + *out = new(ServiceAntiAffinity) + **out = **in + } + if in.LabelPreference != nil { + in, out := &in.LabelPreference, &out.LabelPreference + *out = new(LabelPreference) + **out = **in + } + if in.RequestedToCapacityRatioArguments != nil { + in, out := &in.RequestedToCapacityRatioArguments, &out.RequestedToCapacityRatioArguments + *out = new(RequestedToCapacityRatioArguments) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityArgument. +func (in *PriorityArgument) DeepCopy() *PriorityArgument { + if in == nil { + return nil + } + out := new(PriorityArgument) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityPolicy) DeepCopyInto(out *PriorityPolicy) { + *out = *in + if in.Argument != nil { + in, out := &in.Argument, &out.Argument + *out = new(PriorityArgument) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityPolicy. +func (in *PriorityPolicy) DeepCopy() *PriorityPolicy { + if in == nil { + return nil + } + out := new(PriorityPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestedToCapacityRatioArguments) DeepCopyInto(out *RequestedToCapacityRatioArguments) { + *out = *in + if in.Shape != nil { + in, out := &in.Shape, &out.Shape + *out = make([]UtilizationShapePoint, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ResourceSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestedToCapacityRatioArguments. +func (in *RequestedToCapacityRatioArguments) DeepCopy() *RequestedToCapacityRatioArguments { + if in == nil { + return nil + } + out := new(RequestedToCapacityRatioArguments) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. +func (in *ResourceSpec) DeepCopy() *ResourceSpec { + if in == nil { + return nil + } + out := new(ResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAffinity) DeepCopyInto(out *ServiceAffinity) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAffinity. +func (in *ServiceAffinity) DeepCopy() *ServiceAffinity { + if in == nil { + return nil + } + out := new(ServiceAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAntiAffinity) DeepCopyInto(out *ServiceAntiAffinity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAntiAffinity. +func (in *ServiceAntiAffinity) DeepCopy() *ServiceAntiAffinity { + if in == nil { + return nil + } + out := new(ServiceAntiAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UtilizationShapePoint) DeepCopyInto(out *UtilizationShapePoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UtilizationShapePoint. +func (in *UtilizationShapePoint) DeepCopy() *UtilizationShapePoint { + if in == nil { + return nil + } + out := new(UtilizationShapePoint) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/modules.txt b/vendor/modules.txt index f3c77993b..800c2c5ad 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -61,7 +61,7 @@ github.com/gardener/etcd-druid/api/v1alpha1 # github.com/gardener/external-dns-management v0.7.18 github.com/gardener/external-dns-management/pkg/apis/dns github.com/gardener/external-dns-management/pkg/apis/dns/v1alpha1 -# github.com/gardener/gardener v1.15.0 +# github.com/gardener/gardener v1.15.1-0.20210112065447-570ae178874b ## explicit github.com/gardener/gardener/.github github.com/gardener/gardener/.github/ISSUE_TEMPLATE @@ -96,6 +96,7 @@ github.com/gardener/gardener/extensions/pkg/util/index github.com/gardener/gardener/extensions/pkg/util/secret github.com/gardener/gardener/extensions/pkg/webhook github.com/gardener/gardener/extensions/pkg/webhook/cmd +github.com/gardener/gardener/extensions/pkg/webhook/context github.com/gardener/gardener/extensions/pkg/webhook/controlplane github.com/gardener/gardener/extensions/pkg/webhook/controlplane/genericmutator github.com/gardener/gardener/extensions/pkg/webhook/controlplane/test @@ -167,8 +168,10 @@ github.com/gardener/gardener/pkg/operation/botanist/controlplane/kubescheduler github.com/gardener/gardener/pkg/operation/botanist/extensions/containerruntime github.com/gardener/gardener/pkg/operation/botanist/extensions/controlplane github.com/gardener/gardener/pkg/operation/botanist/extensions/dns +github.com/gardener/gardener/pkg/operation/botanist/extensions/extension github.com/gardener/gardener/pkg/operation/botanist/extensions/infrastructure github.com/gardener/gardener/pkg/operation/botanist/extensions/network +github.com/gardener/gardener/pkg/operation/botanist/extensions/worker github.com/gardener/gardener/pkg/operation/botanist/matchers github.com/gardener/gardener/pkg/operation/botanist/seedsystemcomponents/seedadmission github.com/gardener/gardener/pkg/operation/botanist/systemcomponents/metricsserver @@ -178,6 +181,11 @@ github.com/gardener/gardener/pkg/operation/etcdencryption github.com/gardener/gardener/pkg/operation/garden github.com/gardener/gardener/pkg/operation/seed github.com/gardener/gardener/pkg/operation/seed/istio +github.com/gardener/gardener/pkg/operation/seed/scheduler +github.com/gardener/gardener/pkg/operation/seed/scheduler/configurator +github.com/gardener/gardener/pkg/operation/seed/scheduler/v18 +github.com/gardener/gardener/pkg/operation/seed/scheduler/v19 +github.com/gardener/gardener/pkg/operation/seed/scheduler/v20 github.com/gardener/gardener/pkg/operation/shoot github.com/gardener/gardener/pkg/operation/shootsecrets github.com/gardener/gardener/pkg/scheduler @@ -185,6 +193,7 @@ github.com/gardener/gardener/pkg/scheduler/apis/config github.com/gardener/gardener/pkg/scheduler/apis/config/v1alpha1 github.com/gardener/gardener/pkg/scheduler/controller/common github.com/gardener/gardener/pkg/scheduler/controller/shoot +github.com/gardener/gardener/pkg/seedadmission github.com/gardener/gardener/pkg/utils github.com/gardener/gardener/pkg/utils/chart github.com/gardener/gardener/pkg/utils/context @@ -206,6 +215,9 @@ github.com/gardener/gardener/pkg/version github.com/gardener/gardener/test/framework github.com/gardener/gardener/test/framework/config github.com/gardener/gardener/test/framework/reporter +github.com/gardener/gardener/third_party/kube-scheduler/v18/v1alpha2 +github.com/gardener/gardener/third_party/kube-scheduler/v19/v1beta1 +github.com/gardener/gardener/third_party/kube-scheduler/v20/v1beta1 # github.com/gardener/gardener-resource-manager v0.18.0 github.com/gardener/gardener-resource-manager/pkg/apis/resources github.com/gardener/gardener-resource-manager/pkg/apis/resources/v1alpha1 @@ -542,7 +554,6 @@ go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore # golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 -golang.org/x/crypto/bcrypt golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 golang.org/x/crypto/chacha20 @@ -681,7 +692,7 @@ google.golang.org/genproto/googleapis/iam/v1 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr -# google.golang.org/grpc v1.26.0 +# google.golang.org/grpc v1.28.1 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -703,8 +714,10 @@ google.golang.org/grpc/internal/binarylog google.golang.org/grpc/internal/buffer google.golang.org/grpc/internal/channelz google.golang.org/grpc/internal/envconfig +google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync +google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/syscall @@ -1188,6 +1201,8 @@ k8s.io/kube-openapi/pkg/generators k8s.io/kube-openapi/pkg/generators/rules k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/sets +# k8s.io/kube-scheduler v0.18.10 +k8s.io/kube-scheduler/config/v1 # k8s.io/kubelet v0.18.10 ## explicit k8s.io/kubelet/config/v1beta1 From 562e7de8748bcc62c996d78c503fdcc7601cd988 Mon Sep 17 00:00:00 2001 From: Vladimir Nachev Date: Tue, 12 Jan 2021 13:42:33 +0200 Subject: [PATCH 2/2] Inject ownerRef into infra secrets and configmaps --- Dockerfile | 4 ++-- pkg/internal/terraform.go | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 601c2e509..2f0ff1c46 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,12 +1,12 @@ ############# builder -FROM eu.gcr.io/gardener-project/3rd/golang:1.15.3 AS builder +FROM eu.gcr.io/gardener-project/3rd/golang:1.15.5 AS builder WORKDIR /go/src/github.com/gardener/gardener-extension-provider-gcp COPY . . RUN make install ############# base image -FROM eu.gcr.io/gardener-project/3rd/alpine:3.12.1 AS base +FROM eu.gcr.io/gardener-project/3rd/alpine:3.12.3 AS base ############# gardener-extension-provider-gcp FROM base AS gardener-extension-provider-gcp diff --git a/pkg/internal/terraform.go b/pkg/internal/terraform.go index 10cffed12..ab2e9eab4 100644 --- a/pkg/internal/terraform.go +++ b/pkg/internal/terraform.go @@ -23,6 +23,7 @@ import ( extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/rest" "github.com/gardener/gardener-extension-provider-gcp/pkg/gcp" @@ -59,12 +60,14 @@ func NewTerraformer( return nil, err } + owner := metav1.NewControllerRef(infra, extensionsv1alpha1.SchemeGroupVersion.WithKind(extensionsv1alpha1.InfrastructureResource)) return tf. UseV2(true). SetLogLevel("debug"). SetTerminationGracePeriodSeconds(630). SetDeadlineCleaning(5 * time.Minute). - SetDeadlinePod(15 * time.Minute), nil + SetDeadlinePod(15 * time.Minute). + SetOwnerRef(owner), nil } // NewTerraformerWithAuth initializes a new Terraformer that has the ServiceAccount credentials.