From 024dd1f3a59947190e79b89650fc1be7feb260ca Mon Sep 17 00:00:00 2001 From: Pierangelo Di Pilato Date: Mon, 14 Oct 2024 16:05:32 +0200 Subject: [PATCH] Add KafkaSource v1 API (#4130) * Add KafkaSource v1 types Signed-off-by: Pierangelo Di Pilato * Add conversions functions Signed-off-by: Pierangelo Di Pilato * Update CRD resource Signed-off-by: Pierangelo Di Pilato * Add conversion webhook Signed-off-by: Pierangelo Di Pilato * Fix tests Signed-off-by: Pierangelo Di Pilato * Generate sources v1 clients Signed-off-by: Pierangelo Di Pilato * Add permissions to webhook for read + write CRDs Signed-off-by: Pierangelo Di Pilato --------- Signed-off-by: Pierangelo Di Pilato --- control-plane/cmd/webhook-kafka/main.go | 31 ++ .../100-source/100-kafka-source.yaml | 406 +++++++++++++++- .../200-webhook/100-webhook-cluster-role.yaml | 6 + control-plane/pkg/apis/bindings/v1/doc.go | 22 + control-plane/pkg/apis/bindings/v1/fuzzer.go | 45 ++ .../pkg/apis/bindings/v1/kafka_conversion.go | 44 ++ .../pkg/apis/bindings/v1/kafka_defaults.go | 29 ++ .../pkg/apis/bindings/v1/kafka_lifecycle.go | 225 +++++++++ .../pkg/apis/bindings/v1/kafka_types.go | 128 +++++ .../pkg/apis/bindings/v1/kafka_validation.go | 28 ++ .../pkg/apis/bindings/v1/register.go | 60 +++ .../apis/bindings/v1/zz_generated.deepcopy.go | 222 +++++++++ .../apis/bindings/v1/zz_generated.defaults.go | 33 ++ .../apis/bindings/v1beta1/kafka_conversion.go | 68 ++- control-plane/pkg/apis/sources/v1/doc.go | 22 + control-plane/pkg/apis/sources/v1/fuzzer.go | 45 ++ .../pkg/apis/sources/v1/implements_test.go | 39 ++ .../pkg/apis/sources/v1/kafka_conversion.go | 34 ++ .../pkg/apis/sources/v1/kafka_defaults.go | 80 ++++ .../pkg/apis/sources/v1/kafka_lifecycle.go | 186 ++++++++ .../pkg/apis/sources/v1/kafka_lifecycle_mt.go | 39 ++ .../pkg/apis/sources/v1/kafka_scheduling.go | 52 +++ .../pkg/apis/sources/v1/kafka_types.go | 169 +++++++ .../pkg/apis/sources/v1/kafka_validation.go | 88 ++++ .../apis/sources/v1/kafka_validation_test.go | 166 +++++++ control-plane/pkg/apis/sources/v1/order.go | 55 +++ control-plane/pkg/apis/sources/v1/register.go | 59 +++ .../apis/sources/v1/zz_generated.deepcopy.go | 144 ++++++ .../apis/sources/v1/zz_generated.defaults.go | 33 ++ .../apis/sources/v1beta1/kafka_conversion.go | 61 ++- .../apis/sources/v1beta1/kafka_defaults.go | 7 +- .../client/clientset/versioned/clientset.go | 26 ++ .../versioned/fake/clientset_generated.go | 14 + .../clientset/versioned/fake/register.go | 4 + .../clientset/versioned/scheme/register.go | 4 + .../typed/bindings/v1/bindings_client.go | 107 +++++ .../versioned/typed/bindings/v1/doc.go | 20 + .../versioned/typed/bindings/v1/fake/doc.go | 20 + .../bindings/v1/fake/fake_bindings_client.go | 40 ++ .../bindings/v1/fake/fake_kafkabinding.go | 141 ++++++ .../typed/bindings/v1/generated_expansion.go | 21 + .../typed/bindings/v1/kafkabinding.go | 195 ++++++++ .../versioned/typed/sources/v1/doc.go | 20 + .../versioned/typed/sources/v1/fake/doc.go | 20 + .../typed/sources/v1/fake/fake_kafkasource.go | 164 +++++++ .../sources/v1/fake/fake_sources_client.go | 40 ++ .../typed/sources/v1/generated_expansion.go | 21 + .../versioned/typed/sources/v1/kafkasource.go | 228 +++++++++ .../typed/sources/v1/sources_client.go | 107 +++++ .../externalversions/bindings/interface.go | 8 + .../externalversions/bindings/v1/interface.go | 45 ++ .../bindings/v1/kafkabinding.go | 90 ++++ .../informers/externalversions/generic.go | 12 +- .../externalversions/sources/interface.go | 8 + .../externalversions/sources/v1/interface.go | 45 ++ .../sources/v1/kafkasource.go | 90 ++++ .../sources/v1/kafkasource/fake/fake.go | 40 ++ .../v1/kafkasource/filtered/fake/fake.go | 52 +++ .../v1/kafkasource/filtered/kafkasource.go | 65 +++ .../sources/v1/kafkasource/kafkasource.go | 52 +++ .../sources/v1/kafkasource/controller.go | 170 +++++++ .../sources/v1/kafkasource/reconciler.go | 440 ++++++++++++++++++ .../sources/v1/kafkasource/state.go | 97 ++++ .../bindings/v1/expansion_generated.go | 27 ++ .../listers/bindings/v1/kafkabinding.go | 99 ++++ .../listers/sources/v1/expansion_generated.go | 27 ++ .../client/listers/sources/v1/kafkasource.go | 99 ++++ hack/update-codegen.sh | 2 +- .../apiextensions/interface.go | 54 +++ .../v1/customresourcedefinition.go | 89 ++++ .../apiextensions/v1/interface.go | 45 ++ .../v1beta1/customresourcedefinition.go | 89 ++++ .../apiextensions/v1beta1/interface.go | 45 ++ .../informers/externalversions/factory.go | 261 +++++++++++ .../informers/externalversions/generic.go | 67 +++ .../internalinterfaces/factory_interfaces.go | 40 ++ .../v1beta1/customresourcedefinition.go | 68 +++ .../v1beta1/expansion_generated.go | 23 + .../k8s.io/code-generator/generate-groups.sh | 0 .../generate-internal-groups.sh | 0 .../customresourcedefinition.go | 52 +++ .../informers/factory/factory.go | 56 +++ .../knative.dev/pkg/hack/generate-knative.sh | 0 .../conversion/controller.go | 162 +++++++ .../conversion/conversion.go | 206 ++++++++ .../resourcesemantics/conversion/options.go | 49 ++ .../conversion/reconciler.go | 121 +++++ vendor/modules.txt | 9 + 88 files changed, 6696 insertions(+), 26 deletions(-) create mode 100644 control-plane/pkg/apis/bindings/v1/doc.go create mode 100644 control-plane/pkg/apis/bindings/v1/fuzzer.go create mode 100644 control-plane/pkg/apis/bindings/v1/kafka_conversion.go create mode 100644 control-plane/pkg/apis/bindings/v1/kafka_defaults.go create mode 100644 control-plane/pkg/apis/bindings/v1/kafka_lifecycle.go create mode 100644 control-plane/pkg/apis/bindings/v1/kafka_types.go create mode 100644 control-plane/pkg/apis/bindings/v1/kafka_validation.go create mode 100644 control-plane/pkg/apis/bindings/v1/register.go create mode 100644 control-plane/pkg/apis/bindings/v1/zz_generated.deepcopy.go create mode 100644 control-plane/pkg/apis/bindings/v1/zz_generated.defaults.go create mode 100644 control-plane/pkg/apis/sources/v1/doc.go create mode 100644 control-plane/pkg/apis/sources/v1/fuzzer.go create mode 100644 control-plane/pkg/apis/sources/v1/implements_test.go create mode 100644 control-plane/pkg/apis/sources/v1/kafka_conversion.go create mode 100644 control-plane/pkg/apis/sources/v1/kafka_defaults.go create mode 100644 control-plane/pkg/apis/sources/v1/kafka_lifecycle.go create mode 100644 control-plane/pkg/apis/sources/v1/kafka_lifecycle_mt.go create mode 100644 control-plane/pkg/apis/sources/v1/kafka_scheduling.go create mode 100644 control-plane/pkg/apis/sources/v1/kafka_types.go create mode 100644 control-plane/pkg/apis/sources/v1/kafka_validation.go create mode 100644 control-plane/pkg/apis/sources/v1/kafka_validation_test.go create mode 100644 control-plane/pkg/apis/sources/v1/order.go create mode 100644 control-plane/pkg/apis/sources/v1/register.go create mode 100644 control-plane/pkg/apis/sources/v1/zz_generated.deepcopy.go create mode 100644 control-plane/pkg/apis/sources/v1/zz_generated.defaults.go create mode 100644 control-plane/pkg/client/clientset/versioned/typed/bindings/v1/bindings_client.go create mode 100644 control-plane/pkg/client/clientset/versioned/typed/bindings/v1/doc.go create mode 100644 control-plane/pkg/client/clientset/versioned/typed/bindings/v1/fake/doc.go create mode 100644 control-plane/pkg/client/clientset/versioned/typed/bindings/v1/fake/fake_bindings_client.go create mode 100644 control-plane/pkg/client/clientset/versioned/typed/bindings/v1/fake/fake_kafkabinding.go create mode 100644 control-plane/pkg/client/clientset/versioned/typed/bindings/v1/generated_expansion.go create mode 100644 control-plane/pkg/client/clientset/versioned/typed/bindings/v1/kafkabinding.go create mode 100644 control-plane/pkg/client/clientset/versioned/typed/sources/v1/doc.go create mode 100644 control-plane/pkg/client/clientset/versioned/typed/sources/v1/fake/doc.go create mode 100644 control-plane/pkg/client/clientset/versioned/typed/sources/v1/fake/fake_kafkasource.go create mode 100644 control-plane/pkg/client/clientset/versioned/typed/sources/v1/fake/fake_sources_client.go create mode 100644 control-plane/pkg/client/clientset/versioned/typed/sources/v1/generated_expansion.go create mode 100644 control-plane/pkg/client/clientset/versioned/typed/sources/v1/kafkasource.go create mode 100644 control-plane/pkg/client/clientset/versioned/typed/sources/v1/sources_client.go create mode 100644 control-plane/pkg/client/informers/externalversions/bindings/v1/interface.go create mode 100644 control-plane/pkg/client/informers/externalversions/bindings/v1/kafkabinding.go create mode 100644 control-plane/pkg/client/informers/externalversions/sources/v1/interface.go create mode 100644 control-plane/pkg/client/informers/externalversions/sources/v1/kafkasource.go create mode 100644 control-plane/pkg/client/injection/informers/sources/v1/kafkasource/fake/fake.go create mode 100644 control-plane/pkg/client/injection/informers/sources/v1/kafkasource/filtered/fake/fake.go create mode 100644 control-plane/pkg/client/injection/informers/sources/v1/kafkasource/filtered/kafkasource.go create mode 100644 control-plane/pkg/client/injection/informers/sources/v1/kafkasource/kafkasource.go create mode 100644 control-plane/pkg/client/injection/reconciler/sources/v1/kafkasource/controller.go create mode 100644 control-plane/pkg/client/injection/reconciler/sources/v1/kafkasource/reconciler.go create mode 100644 control-plane/pkg/client/injection/reconciler/sources/v1/kafkasource/state.go create mode 100644 control-plane/pkg/client/listers/bindings/v1/expansion_generated.go create mode 100644 control-plane/pkg/client/listers/bindings/v1/kafkabinding.go create mode 100644 control-plane/pkg/client/listers/sources/v1/expansion_generated.go create mode 100644 control-plane/pkg/client/listers/sources/v1/kafkasource.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/interface.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1/customresourcedefinition.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1/interface.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/interface.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/factory.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/generic.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/customresourcedefinition.go create mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/expansion_generated.go mode change 100644 => 100755 vendor/k8s.io/code-generator/generate-groups.sh mode change 100644 => 100755 vendor/k8s.io/code-generator/generate-internal-groups.sh create mode 100644 vendor/knative.dev/pkg/client/injection/apiextensions/informers/apiextensions/v1/customresourcedefinition/customresourcedefinition.go create mode 100644 vendor/knative.dev/pkg/client/injection/apiextensions/informers/factory/factory.go mode change 100644 => 100755 vendor/knative.dev/pkg/hack/generate-knative.sh create mode 100644 vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/controller.go create mode 100644 vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/conversion.go create mode 100644 vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/options.go create mode 100644 vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/reconciler.go diff --git a/control-plane/cmd/webhook-kafka/main.go b/control-plane/cmd/webhook-kafka/main.go index 96ed85842c..6fe3070d4f 100644 --- a/control-plane/cmd/webhook-kafka/main.go +++ b/control-plane/cmd/webhook-kafka/main.go @@ -30,12 +30,14 @@ import ( "knative.dev/pkg/webhook" "knative.dev/pkg/webhook/certificates" "knative.dev/pkg/webhook/resourcesemantics" + "knative.dev/pkg/webhook/resourcesemantics/conversion" "knative.dev/pkg/webhook/resourcesemantics/defaulting" "knative.dev/pkg/webhook/resourcesemantics/validation" eventingcorev1 "knative.dev/eventing/pkg/apis/eventing/v1" "knative.dev/eventing/pkg/apis/feature" + sourcesv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1" sourcesv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1beta1" messagingv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1beta1" @@ -53,6 +55,7 @@ const ( var types = map[schema.GroupVersionKind]resourcesemantics.GenericCRD{ eventingv1alpha1.SchemeGroupVersion.WithKind("KafkaSink"): &eventingv1alpha1.KafkaSink{}, sourcesv1beta1.SchemeGroupVersion.WithKind("KafkaSource"): &sourcesv1beta1.KafkaSource{}, + sourcesv1.SchemeGroupVersion.WithKind("KafkaSource"): &sourcesv1.KafkaSource{}, messagingv1beta1.SchemeGroupVersion.WithKind("KafkaChannel"): &messagingv1beta1.KafkaChannel{}, eventingcorev1.SchemeGroupVersion.WithKind("Broker"): &eventingv1.BrokerStub{}, kafkainternals.SchemeGroupVersion.WithKind("ConsumerGroup"): &kafkainternals.ConsumerGroup{}, @@ -144,6 +147,33 @@ func NewValidationAdmissionController(ctx context.Context, cmw configmap.Watcher ) } +func NewConversionController(ctx context.Context, _ configmap.Watcher) *controller.Impl { + + ctxFunc := func(ctx context.Context) context.Context { + return ctx + } + + return conversion.NewConversionController( + ctx, + + // The path on which to serve the webhook + "/resource-conversion", + + map[schema.GroupKind]conversion.GroupKindConversion{ + sourcesv1.Kind("KafkaSource"): { + DefinitionName: "kafkasources.sources.knative.dev", + HubVersion: sourcesv1beta1.SchemeGroupVersion.Version, + Zygotes: map[string]conversion.ConvertibleObject{ + sourcesv1beta1.SchemeGroupVersion.Version: &sourcesv1beta1.KafkaSource{}, + sourcesv1.SchemeGroupVersion.Version: &sourcesv1.KafkaSource{}, + }, + }, + }, + // A function that infuses the context passed to ConvertTo/ConvertFrom/SetDefaults with custom metadata. + ctxFunc, + ) +} + func main() { // Set up a signal context with our webhook options @@ -159,5 +189,6 @@ func main() { NewDefaultingAdmissionController, NewPodDefaultingAdmissionController, NewValidationAdmissionController, + NewConversionController, ) } diff --git a/control-plane/config/eventing-kafka-broker/100-source/100-kafka-source.yaml b/control-plane/config/eventing-kafka-broker/100-source/100-kafka-source.yaml index ac5391ba80..917d60ed69 100644 --- a/control-plane/config/eventing-kafka-broker/100-source/100-kafka-source.yaml +++ b/control-plane/config/eventing-kafka-broker/100-source/100-kafka-source.yaml @@ -433,6 +433,410 @@ spec: - name: Age type: date jsonPath: .metadata.creationTimestamp + - name: v1 + served: true + storage: false + schema: + openAPIV3Schema: + description: KafkaSource is the Schema for the kafkasources API. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: KafkaSourceSpec defines the desired state of the KafkaSource. + type: object + required: + - bootstrapServers + - topics + properties: + bootstrapServers: + description: Bootstrap servers are the Kafka servers the consumer will connect to. + type: array + items: + type: string + ceOverrides: + description: CloudEventOverrides defines overrides to control the output format and modifications of the event sent to the sink. + type: object + properties: + extensions: + description: Extensions specify what attribute are added or overridden on the outbound event. Each `Extensions` key-value pair are set on the event as an attribute extension independently. + type: object + additionalProperties: + type: string + consumerGroup: + description: ConsumerGroupID is the consumer group ID. + type: string + consumers: + description: "Number of desired consumers running in the consumer group. Defaults to 1. \n This is a pointer to distinguish between explicit zero and not specified." + type: integer + format: int32 + delivery: + description: Delivery contains the delivery spec for this source + type: object + properties: + backoffDelay: + description: "BackoffDelay is the delay before retrying. More information on Duration format: - https://www.iso.org/iso-8601-date-and-time-format.html - https://en.wikipedia.org/wiki/ISO_8601 \n For linear policy, backoff delay is backoffDelay*. For exponential policy, backoff delay is backoffDelay*2^." + type: string + backoffPolicy: + description: BackoffPolicy is the retry backoff policy (linear, exponential). + type: string + deadLetterSink: + description: DeadLetterSink is the sink receiving event that could not be sent to a destination. + type: object + properties: + ref: + description: Ref points to an Addressable. + type: object + required: + - kind + - name + properties: + address: + description: Address points to a specific Address Name. + type: string + apiVersion: + description: API version of the referent. + type: string + group: + description: 'Group of the API, without the version of the group. This can be used as an alternative to the APIVersion, and then resolved using ResolveGroup. Note: This API is EXPERIMENTAL and might break anytime. For more details: https://github.com/knative/eventing/issues/5086' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ This is optional field, it gets defaulted to the object holding it if left out.' + type: string + uri: + description: URI can be an absolute URL(non-empty scheme and non-empty host) pointing to the target or a relative URI. Relative URIs will be resolved using the base URI retrieved from Ref. + type: string + CACerts: + description: CACerts are Certification Authority (CA) certificates in PEM format according to https://www.rfc-editor.org/rfc/rfc7468. If set, these CAs are appended to the set of CAs provided by the Addressable target, if any. + type: string + audience: + description: Audience is the OIDC audience for the deadLetterSink. + type: string + retry: + description: Retry is the minimum number of retries the sender should attempt when sending an event before moving it to the dead letter sink. + type: integer + format: int32 + retryAfterMax: + description: "RetryAfterMax provides an optional upper bound on the duration specified in a \"Retry-After\" header when calculating backoff times for retrying 429 and 503 response codes. Setting the value to zero (\"PT0S\") can be used to opt-out of respecting \"Retry-After\" header values altogether. This value only takes effect if \"Retry\" is configured, and also depends on specific implementations (Channels, Sources, etc.) choosing to provide this capability. \n Note: This API is EXPERIMENTAL and might be changed at anytime. While this experimental feature is in the Alpha/Beta stage, you must provide a valid value to opt-in for supporting \"Retry-After\" headers. When the feature becomes Stable/GA \"Retry-After\" headers will be respected by default, and you can choose to specify \"PT0S\" to opt-out of supporting \"Retry-After\" headers. For more details: https://github.com/knative/eventing/issues/5811 \n More information on Duration format: - https://www.iso.org/iso-8601-date-and-time-format.html - https://en.wikipedia.org/wiki/ISO_8601" + type: string + timeout: + description: "Timeout is the timeout of each single request. The value must be greater than 0. More information on Duration format: - https://www.iso.org/iso-8601-date-and-time-format.html - https://en.wikipedia.org/wiki/ISO_8601 \n Note: This API is EXPERIMENTAL and might break anytime. For more details: https://github.com/knative/eventing/issues/5148" + type: string + initialOffset: + description: InitialOffset is the Initial Offset for the consumer group. should be earliest or latest + type: string + net: + type: object + properties: + sasl: + type: object + properties: + enable: + type: boolean + password: + description: Password is the Kubernetes secret containing the SASL password. + type: object + properties: + secretKeyRef: + description: The Secret key to select from. + type: object + required: + - key + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + x-kubernetes-map-type: atomic + type: + description: Type of saslType, defaults to plain (vs SCRAM-SHA-512 or SCRAM-SHA-256) + type: object + properties: + secretKeyRef: + description: The Secret key to select from. + type: object + required: + - key + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + x-kubernetes-map-type: atomic + user: + description: User is the Kubernetes secret containing the SASL username. + type: object + properties: + secretKeyRef: + description: The Secret key to select from. + type: object + required: + - key + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + x-kubernetes-map-type: atomic + tls: + type: object + properties: + caCert: + description: CACert is the Kubernetes secret containing the server CA cert. + type: object + properties: + secretKeyRef: + description: The Secret key to select from. + type: object + required: + - key + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + x-kubernetes-map-type: atomic + cert: + description: Cert is the Kubernetes secret containing the client certificate. + type: object + properties: + secretKeyRef: + description: The Secret key to select from. + type: object + required: + - key + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + x-kubernetes-map-type: atomic + enable: + type: boolean + key: + description: Key is the Kubernetes secret containing the client key. + type: object + properties: + secretKeyRef: + description: The Secret key to select from. + type: object + required: + - key + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + x-kubernetes-map-type: atomic + ordering: + description: Ordering is the type of the consumer verticle. Should be ordered or unordered. By default, it is ordered. + type: string + sink: + description: Sink is a reference to an object that will resolve to a uri to use as the sink. + type: object + properties: + ref: + description: Ref points to an Addressable. + type: object + required: + - kind + - name + properties: + address: + description: Address points to a specific Address Name. + type: string + apiVersion: + description: API version of the referent. + type: string + group: + description: 'Group of the API, without the version of the group. This can be used as an alternative to the APIVersion, and then resolved using ResolveGroup. Note: This API is EXPERIMENTAL and might break anytime. For more details: https://github.com/knative/eventing/issues/5086' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ This is optional field, it gets defaulted to the object holding it if left out.' + type: string + uri: + description: URI can be an absolute URL(non-empty scheme and non-empty host) pointing to the target or a relative URI. Relative URIs will be resolved using the base URI retrieved from Ref. + type: string + CACerts: + description: CACerts are Certification Authority (CA) certificates in PEM format according to https://www.rfc-editor.org/rfc/rfc7468. If set, these CAs are appended to the set of CAs provided by the Addressable target, if any. + type: string + audience: + description: Audience is the OIDC audience for the sink. + type: string + topics: + description: Topic topics to consume messages from + type: array + items: + type: string + status: + description: KafkaSourceStatus defines the observed state of KafkaSource. + type: object + properties: + annotations: + description: Annotations is additional Status fields for the Resource to save some additional State as well as convey more information to the user. This is roughly akin to Annotations on any k8s resource, just the reconciler conveying richer information outwards. + type: object + additionalProperties: + type: string + ceAttributes: + description: CloudEventAttributes are the specific attributes that the Source uses as part of its CloudEvents. + type: array + items: + description: CloudEventAttributes specifies the attributes that a Source uses as part of its CloudEvents. + type: object + properties: + source: + description: Source is the CloudEvents source attribute. + type: string + type: + description: Type refers to the CloudEvent type attribute. + type: string + claims: + description: Claims consumed by this KafkaSource instance + type: string + conditions: + description: Conditions the latest available observations of a resource's current state. + type: array + items: + description: 'Condition defines a readiness condition for a Knative resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the last time the condition transitioned from one status to another. We use VolatileTime in place of metav1.Time to exclude this from creating equality.Semantic differences (all other things held constant). + type: string + message: + description: A human readable message indicating details about the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + severity: + description: Severity with which to treat failures of this type of condition. When this is not specified, it defaults to Error. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + consumers: + description: Total number of consumers actually running in the consumer group. + type: integer + format: int32 + maxAllowedVReplicas: + type: integer + format: int32 + observedGeneration: + description: ObservedGeneration is the 'Generation' of the Service that was last processed by the controller. + type: integer + format: int64 + placements: + type: array + items: + type: object + properties: + podName: + description: PodName is the name of the pod where the resource is placed + type: string + vreplicas: + description: VReplicas is the number of virtual replicas assigned to in the pod + type: integer + format: int32 + selector: + description: Use for labelSelectorPath when scaling Kafka source + type: string + sinkCACerts: + description: SinkCACerts are Certification Authority (CA) certificates in PEM format according to https://www.rfc-editor.org/rfc/rfc7468. + type: string + sinkUri: + description: SinkURI is the current active sink URI that has been configured for the Source. + type: string + sinkAudience: + description: SinkAudience is the OIDC audience of the sink. + type: string + auth: + description: Auth provides the relevant information for OIDC authentication. + type: object + properties: + serviceAccountName: + description: ServiceAccountName is the name of the generated service account used for this components OIDC authentication. + type: string + subresources: + status: {} + scale: + # specReplicasPath defines the JSONPath inside of a custom resource that corresponds to Scale.Spec.Replicas. + specReplicasPath: .spec.consumers + # statusReplicasPath defines the JSONPath inside of a custom resource that corresponds to Scale.Status.Replicas. + statusReplicasPath: .status.consumers + # labelSelectorPath defines the JSONPath inside of a custom resource that corresponds to Scale.Status.Selector + labelSelectorPath: .status.selector + additionalPrinterColumns: + - name: Topics + type: string + jsonPath: ".spec.topics" + - name: BootstrapServers + type: string + jsonPath: ".spec.bootstrapServers" + - name: Ready + type: string + jsonPath: ".status.conditions[?(@.type==\"Ready\")].status" + - name: Reason + type: string + jsonPath: ".status.conditions[?(@.type==\"Ready\")].reason" + - name: Age + type: date + jsonPath: .metadata.creationTimestamp names: categories: - all @@ -448,5 +852,5 @@ spec: conversionReviewVersions: ["v1", "v1beta1"] clientConfig: service: - name: kafka-source-webhook + name: kafka-webhook-eventing namespace: knative-eventing diff --git a/control-plane/config/eventing-kafka-broker/200-webhook/100-webhook-cluster-role.yaml b/control-plane/config/eventing-kafka-broker/200-webhook/100-webhook-cluster-role.yaml index 362241535d..5d644c321d 100644 --- a/control-plane/config/eventing-kafka-broker/200-webhook/100-webhook-cluster-role.yaml +++ b/control-plane/config/eventing-kafka-broker/200-webhook/100-webhook-cluster-role.yaml @@ -108,3 +108,9 @@ rules: verbs: - get - list + + # Necessary for conversion webhook. + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "create", "update", "patch", "watch"] + diff --git a/control-plane/pkg/apis/bindings/v1/doc.go b/control-plane/pkg/apis/bindings/v1/doc.go new file mode 100644 index 0000000000..747d67b949 --- /dev/null +++ b/control-plane/pkg/apis/bindings/v1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the sources v1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +groupName=bindings.knative.dev +package v1 diff --git a/control-plane/pkg/apis/bindings/v1/fuzzer.go b/control-plane/pkg/apis/bindings/v1/fuzzer.go new file mode 100644 index 0000000000..4c1837b750 --- /dev/null +++ b/control-plane/pkg/apis/bindings/v1/fuzzer.go @@ -0,0 +1,45 @@ +/* +Copyright 2020 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + fuzz "github.com/google/gofuzz" + "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" + "k8s.io/apimachinery/pkg/runtime/serializer" + pkgfuzzer "knative.dev/pkg/apis/testing/fuzzer" +) + +// FuzzerFuncs includes fuzzing funcs for bindings.knative.dev v1 types +// +// For other examples see +// https://github.com/kubernetes/apimachinery/blob/master/pkg/apis/meta/fuzzer/fuzzer.go +var FuzzerFuncs = fuzzer.MergeFuzzerFuncs( + func(codecs serializer.CodecFactory) []interface{} { + return []interface{}{ + func(s *KafkaBindingStatus, c fuzz.Continue) { + c.FuzzNoCustom(s) // fuzz the status object + + // Clear the random fuzzed condition + s.Status.SetConditions(nil) + + // Fuzz the known conditions except their type value + s.InitializeConditions() + pkgfuzzer.FuzzConditions(&s.Status, c) + }, + } + }, +) diff --git a/control-plane/pkg/apis/bindings/v1/kafka_conversion.go b/control-plane/pkg/apis/bindings/v1/kafka_conversion.go new file mode 100644 index 0000000000..c15243f905 --- /dev/null +++ b/control-plane/pkg/apis/bindings/v1/kafka_conversion.go @@ -0,0 +1,44 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + + "knative.dev/pkg/apis" +) + +// ConvertTo implements apis.Convertible +func (source *KafkaBinding) ConvertTo(_ context.Context, sink apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", sink) +} + +// ConvertFrom implements apis.Convertible +func (sink *KafkaBinding) ConvertFrom(_ context.Context, source apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", source) +} + +// ConvertTo implements apis.Convertible +func (source *KafkaAuthSpec) ConvertTo(_ context.Context, sink apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", sink) +} + +// ConvertFrom implements apis.Convertible +func (sink *KafkaAuthSpec) ConvertFrom(_ context.Context, source apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", source) +} diff --git a/control-plane/pkg/apis/bindings/v1/kafka_defaults.go b/control-plane/pkg/apis/bindings/v1/kafka_defaults.go new file mode 100644 index 0000000000..7075506ce4 --- /dev/null +++ b/control-plane/pkg/apis/bindings/v1/kafka_defaults.go @@ -0,0 +1,29 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" +) + +// SetDefaults ensures KafkaBinding reflects the default values. +func (r *KafkaBinding) SetDefaults(ctx context.Context) { + if r.Spec.Subject.Namespace == "" { + // Default the subject's namespace to our namespace. + r.Spec.Subject.Namespace = r.Namespace + } +} diff --git a/control-plane/pkg/apis/bindings/v1/kafka_lifecycle.go b/control-plane/pkg/apis/bindings/v1/kafka_lifecycle.go new file mode 100644 index 0000000000..3afb73d01b --- /dev/null +++ b/control-plane/pkg/apis/bindings/v1/kafka_lifecycle.go @@ -0,0 +1,225 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/apis" + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/tracker" +) + +var kfbCondSet = apis.NewLivingConditionSet() + +// GetGroupVersionKind returns the GroupVersionKind. +func (*KafkaBinding) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("KafkaBinding") +} + +// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface. +func (*KafkaBinding) GetConditionSet() apis.ConditionSet { + return kfbCondSet +} + +// GetUntypedSpec implements apis.HasSpec +func (s *KafkaBinding) GetUntypedSpec() interface{} { + return s.Spec +} + +// GetSubject implements psbinding.Bindable +func (sb *KafkaBinding) GetSubject() tracker.Reference { + return sb.Spec.Subject +} + +// GetBindingStatus implements psbinding.Bindable +func (sb *KafkaBinding) GetBindingStatus() duck.BindableStatus { + return &sb.Status +} + +// SetObservedGeneration implements psbinding.BindableStatus +func (sbs *KafkaBindingStatus) SetObservedGeneration(gen int64) { + sbs.ObservedGeneration = gen +} + +// InitializeConditions populates the KafkaBindingStatus's conditions field +// with all of its conditions configured to Unknown. +func (sbs *KafkaBindingStatus) InitializeConditions() { + kfbCondSet.Manage(sbs).InitializeConditions() +} + +// MarkBindingUnavailable marks the KafkaBinding's Ready condition to False with +// the provided reason and message. +func (sbs *KafkaBindingStatus) MarkBindingUnavailable(reason, message string) { + kfbCondSet.Manage(sbs).MarkFalse(KafkaBindingConditionReady, reason, message) +} + +// MarkBindingAvailable marks the KafkaBinding's Ready condition to True. +func (sbs *KafkaBindingStatus) MarkBindingAvailable() { + kfbCondSet.Manage(sbs).MarkTrue(KafkaBindingConditionReady) +} + +// Do implements psbinding.Bindable +func (kfb *KafkaBinding) Do(ctx context.Context, ps *duckv1.WithPod) { + // First undo so that we can just unconditionally append below. + kfb.Undo(ctx, ps) + + spec := ps.Spec.Template.Spec + for i := range spec.InitContainers { + spec.InitContainers[i].Env = append(spec.InitContainers[i].Env, corev1.EnvVar{ + Name: "KAFKA_BOOTSTRAP_SERVERS", + Value: strings.Join(kfb.Spec.BootstrapServers, ","), + }) + if kfb.Spec.Net.SASL.Enable { + spec.InitContainers[i].Env = append(spec.InitContainers[i].Env, corev1.EnvVar{ + Name: "KAFKA_NET_SASL_ENABLE", + Value: "true", + }, corev1.EnvVar{ + Name: "KAFKA_NET_SASL_USER", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: kfb.Spec.Net.SASL.User.SecretKeyRef, + }, + }, corev1.EnvVar{ + Name: "KAFKA_NET_SASL_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: kfb.Spec.Net.SASL.Password.SecretKeyRef, + }, + }, corev1.EnvVar{ + Name: "KAFKA_NET_SASL_TYPE", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: kfb.Spec.Net.SASL.Type.SecretKeyRef, + }, + }) + } + if kfb.Spec.Net.TLS.Enable { + spec.InitContainers[i].Env = append(spec.InitContainers[i].Env, corev1.EnvVar{ + Name: "KAFKA_NET_TLS_ENABLE", + Value: "true", + }, corev1.EnvVar{ + Name: "KAFKA_NET_TLS_CERT", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: kfb.Spec.Net.TLS.Cert.SecretKeyRef, + }, + }, corev1.EnvVar{ + Name: "KAFKA_NET_TLS_KEY", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: kfb.Spec.Net.TLS.Key.SecretKeyRef, + }, + }, corev1.EnvVar{ + Name: "KAFKA_NET_TLS_CA_CERT", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: kfb.Spec.Net.TLS.CACert.SecretKeyRef, + }, + }) + } + } + + for i := range spec.Containers { + spec.Containers[i].Env = append(spec.Containers[i].Env, corev1.EnvVar{ + Name: "KAFKA_BOOTSTRAP_SERVERS", + Value: strings.Join(kfb.Spec.BootstrapServers, ","), + }) + + if kfb.Spec.Net.SASL.Enable { + spec.Containers[i].Env = append(spec.Containers[i].Env, corev1.EnvVar{ + Name: "KAFKA_NET_SASL_ENABLE", + Value: "true", + }, corev1.EnvVar{ + Name: "KAFKA_NET_SASL_USER", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: kfb.Spec.Net.SASL.User.SecretKeyRef, + }, + }, corev1.EnvVar{ + Name: "KAFKA_NET_SASL_PASSWORD", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: kfb.Spec.Net.SASL.Password.SecretKeyRef, + }, + }, corev1.EnvVar{ + Name: "KAFKA_NET_SASL_TYPE", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: kfb.Spec.Net.SASL.Type.SecretKeyRef, + }, + }) + } + if kfb.Spec.Net.TLS.Enable { + spec.Containers[i].Env = append(spec.Containers[i].Env, corev1.EnvVar{ + Name: "KAFKA_NET_TLS_ENABLE", + Value: "true", + }, corev1.EnvVar{ + Name: "KAFKA_NET_TLS_CERT", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: kfb.Spec.Net.TLS.Cert.SecretKeyRef, + }, + }, corev1.EnvVar{ + Name: "KAFKA_NET_TLS_KEY", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: kfb.Spec.Net.TLS.Key.SecretKeyRef, + }, + }, corev1.EnvVar{ + Name: "KAFKA_NET_TLS_CA_CERT", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: kfb.Spec.Net.TLS.CACert.SecretKeyRef, + }, + }) + } + } +} + +func (kfb *KafkaBinding) Undo(ctx context.Context, ps *duckv1.WithPod) { + spec := ps.Spec.Template.Spec + + for i, c := range spec.InitContainers { + if len(c.Env) == 0 { + continue + } + env := make([]corev1.EnvVar, 0, len(spec.InitContainers[i].Env)) + for j, ev := range c.Env { + switch ev.Name { + case "KAFKA_NET_TLS_ENABLE", "KAFKA_NET_TLS_CERT", "KAFKA_NET_TLS_KEY", "KAFKA_NET_TLS_CA_CERT", + "KAFKA_NET_SASL_ENABLE", "KAFKA_NET_SASL_USER", "KAFKA_NET_SASL_PASSWORD", "KAFKA_NET_SASL_TYPE", + "KAFKA_BOOTSTRAP_SERVERS": + + continue + default: + env = append(env, spec.InitContainers[i].Env[j]) + } + } + spec.InitContainers[i].Env = env + } + + for i, c := range spec.Containers { + if len(c.Env) == 0 { + continue + } + env := make([]corev1.EnvVar, 0, len(spec.Containers[i].Env)) + for j, ev := range c.Env { + switch ev.Name { + case "KAFKA_NET_TLS_ENABLE", "KAFKA_NET_TLS_CERT", "KAFKA_NET_TLS_KEY", "KAFKA_NET_TLS_CA_CERT", + "KAFKA_NET_SASL_ENABLE", "KAFKA_NET_SASL_USER", "KAFKA_NET_SASL_PASSWORD", "KAFKA_NET_SASL_TYPE", + "KAFKA_BOOTSTRAP_SERVERS": + continue + default: + env = append(env, spec.Containers[i].Env[j]) + } + } + spec.Containers[i].Env = env + } +} diff --git a/control-plane/pkg/apis/bindings/v1/kafka_types.go b/control-plane/pkg/apis/bindings/v1/kafka_types.go new file mode 100644 index 0000000000..0eb65cc72d --- /dev/null +++ b/control-plane/pkg/apis/bindings/v1/kafka_types.go @@ -0,0 +1,128 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + duckv1alpha1 "knative.dev/pkg/apis/duck/v1alpha1" + "knative.dev/pkg/kmeta" + "knative.dev/pkg/webhook/resourcesemantics" +) + +// +genclient +// +genreconciler +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// KafkaBinding is the Schema for the kafkasources API. +// +k8s:openapi-gen=true +type KafkaBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec KafkaBindingSpec `json:"spec,omitempty"` + Status KafkaBindingStatus `json:"status,omitempty"` +} + +// Check that KafkaBinding can be validated and can be defaulted. +var _ runtime.Object = (*KafkaBinding)(nil) +var _ resourcesemantics.GenericCRD = (*KafkaBinding)(nil) +var _ kmeta.OwnerRefable = (*KafkaBinding)(nil) +var _ duckv1.KRShaped = (*KafkaBinding)(nil) + +type KafkaSASLSpec struct { + Enable bool `json:"enable,omitempty"` + + // User is the Kubernetes secret containing the SASL username. + // +optional + User SecretValueFromSource `json:"user,omitempty"` + + // Password is the Kubernetes secret containing the SASL password. + // +optional + Password SecretValueFromSource `json:"password,omitempty"` + + // Type of saslType, defaults to plain (vs SCRAM-SHA-512 or SCRAM-SHA-256) + // +optional + Type SecretValueFromSource `json:"type,omitempty"` +} + +type KafkaTLSSpec struct { + Enable bool `json:"enable,omitempty"` + + // Cert is the Kubernetes secret containing the client certificate. + // +optional + Cert SecretValueFromSource `json:"cert,omitempty"` + // Key is the Kubernetes secret containing the client key. + // +optional + Key SecretValueFromSource `json:"key,omitempty"` + // CACert is the Kubernetes secret containing the server CA cert. + // +optional + CACert SecretValueFromSource `json:"caCert,omitempty"` +} + +// SecretValueFromSource represents the source of a secret value +type SecretValueFromSource struct { + // The Secret key to select from. + SecretKeyRef *corev1.SecretKeySelector `json:"secretKeyRef,omitempty"` +} + +type KafkaNetSpec struct { + SASL KafkaSASLSpec `json:"sasl,omitempty"` + TLS KafkaTLSSpec `json:"tls,omitempty"` +} + +type KafkaAuthSpec struct { + // Bootstrap servers are the Kafka servers the consumer will connect to. + // +required + BootstrapServers []string `json:"bootstrapServers"` + + Net KafkaNetSpec `json:"net,omitempty"` +} + +// KafkaBindingSpec defines the desired state of the KafkaBinding. +type KafkaBindingSpec struct { + duckv1alpha1.BindingSpec `json:",inline"` + + KafkaAuthSpec `json:",inline"` +} + +const ( + // KafkaBindingConditionReady is configured to indicate whether the Binding + // has been configured for resources subject to its runtime contract. + KafkaBindingConditionReady = apis.ConditionReady +) + +// KafkaBindingStatus defines the observed state of KafkaBinding. +type KafkaBindingStatus struct { + duckv1.Status `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KafkaBindingList contains a list of KafkaBindings. +type KafkaBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []KafkaBinding `json:"items"` +} + +// GetStatus retrieves the duck status for this resource. Implements the KRShaped interface. +func (k *KafkaBinding) GetStatus() *duckv1.Status { + return &k.Status.Status +} diff --git a/control-plane/pkg/apis/bindings/v1/kafka_validation.go b/control-plane/pkg/apis/bindings/v1/kafka_validation.go new file mode 100644 index 0000000000..7ef699a680 --- /dev/null +++ b/control-plane/pkg/apis/bindings/v1/kafka_validation.go @@ -0,0 +1,28 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + + "knative.dev/pkg/apis" +) + +// Validate ensures KafkaBinding is properly configured. +func (r *KafkaBinding) Validate(ctx context.Context) *apis.FieldError { + return nil +} diff --git a/control-plane/pkg/apis/bindings/v1/register.go b/control-plane/pkg/apis/bindings/v1/register.go new file mode 100644 index 0000000000..a60ecd433a --- /dev/null +++ b/control-plane/pkg/apis/bindings/v1/register.go @@ -0,0 +1,60 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// NOTE: Boilerplate only. Ignore this file. + +// Package v1 contains API Schema definitions for the sources v1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +groupName=sources.knative.dev +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: bindings.GroupName, Version: "v1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &KafkaBinding{}, + &KafkaBindingList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/control-plane/pkg/apis/bindings/v1/zz_generated.deepcopy.go b/control-plane/pkg/apis/bindings/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..a7db6ded2e --- /dev/null +++ b/control-plane/pkg/apis/bindings/v1/zz_generated.deepcopy.go @@ -0,0 +1,222 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaAuthSpec) DeepCopyInto(out *KafkaAuthSpec) { + *out = *in + if in.BootstrapServers != nil { + in, out := &in.BootstrapServers, &out.BootstrapServers + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Net.DeepCopyInto(&out.Net) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaAuthSpec. +func (in *KafkaAuthSpec) DeepCopy() *KafkaAuthSpec { + if in == nil { + return nil + } + out := new(KafkaAuthSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaBinding) DeepCopyInto(out *KafkaBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaBinding. +func (in *KafkaBinding) DeepCopy() *KafkaBinding { + if in == nil { + return nil + } + out := new(KafkaBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaBindingList) DeepCopyInto(out *KafkaBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KafkaBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaBindingList. +func (in *KafkaBindingList) DeepCopy() *KafkaBindingList { + if in == nil { + return nil + } + out := new(KafkaBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaBindingSpec) DeepCopyInto(out *KafkaBindingSpec) { + *out = *in + in.BindingSpec.DeepCopyInto(&out.BindingSpec) + in.KafkaAuthSpec.DeepCopyInto(&out.KafkaAuthSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaBindingSpec. +func (in *KafkaBindingSpec) DeepCopy() *KafkaBindingSpec { + if in == nil { + return nil + } + out := new(KafkaBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaBindingStatus) DeepCopyInto(out *KafkaBindingStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaBindingStatus. +func (in *KafkaBindingStatus) DeepCopy() *KafkaBindingStatus { + if in == nil { + return nil + } + out := new(KafkaBindingStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaNetSpec) DeepCopyInto(out *KafkaNetSpec) { + *out = *in + in.SASL.DeepCopyInto(&out.SASL) + in.TLS.DeepCopyInto(&out.TLS) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaNetSpec. +func (in *KafkaNetSpec) DeepCopy() *KafkaNetSpec { + if in == nil { + return nil + } + out := new(KafkaNetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaSASLSpec) DeepCopyInto(out *KafkaSASLSpec) { + *out = *in + in.User.DeepCopyInto(&out.User) + in.Password.DeepCopyInto(&out.Password) + in.Type.DeepCopyInto(&out.Type) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSASLSpec. +func (in *KafkaSASLSpec) DeepCopy() *KafkaSASLSpec { + if in == nil { + return nil + } + out := new(KafkaSASLSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaTLSSpec) DeepCopyInto(out *KafkaTLSSpec) { + *out = *in + in.Cert.DeepCopyInto(&out.Cert) + in.Key.DeepCopyInto(&out.Key) + in.CACert.DeepCopyInto(&out.CACert) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaTLSSpec. +func (in *KafkaTLSSpec) DeepCopy() *KafkaTLSSpec { + if in == nil { + return nil + } + out := new(KafkaTLSSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretValueFromSource) DeepCopyInto(out *SecretValueFromSource) { + *out = *in + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(corev1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretValueFromSource. +func (in *SecretValueFromSource) DeepCopy() *SecretValueFromSource { + if in == nil { + return nil + } + out := new(SecretValueFromSource) + in.DeepCopyInto(out) + return out +} diff --git a/control-plane/pkg/apis/bindings/v1/zz_generated.defaults.go b/control-plane/pkg/apis/bindings/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..a0005a51d2 --- /dev/null +++ b/control-plane/pkg/apis/bindings/v1/zz_generated.defaults.go @@ -0,0 +1,33 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/control-plane/pkg/apis/bindings/v1beta1/kafka_conversion.go b/control-plane/pkg/apis/bindings/v1beta1/kafka_conversion.go index 4465370e05..8135e11bb9 100644 --- a/control-plane/pkg/apis/bindings/v1beta1/kafka_conversion.go +++ b/control-plane/pkg/apis/bindings/v1beta1/kafka_conversion.go @@ -18,27 +18,63 @@ package v1beta1 import ( "context" - "fmt" - "knative.dev/pkg/apis" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1" ) -// ConvertTo implements apis.Convertible -func (source *KafkaBinding) ConvertTo(_ context.Context, sink apis.Convertible) error { - return fmt.Errorf("v1beta1 is the highest known version, got: %T", sink) +// ConvertToV1 converts v1beta1 to v1. +func (source *KafkaAuthSpec) ConvertToV1(_ context.Context) *v1.KafkaAuthSpec { + if source == nil { + return nil + } + sink := &v1.KafkaAuthSpec{ + BootstrapServers: source.BootstrapServers, + Net: v1.KafkaNetSpec{ + SASL: v1.KafkaSASLSpec{ + Enable: source.Net.SASL.Enable, + User: v1.SecretValueFromSource{ + SecretKeyRef: source.Net.SASL.User.SecretKeyRef, + }, + Password: v1.SecretValueFromSource{ + SecretKeyRef: source.Net.SASL.Password.SecretKeyRef, + }, + Type: v1.SecretValueFromSource{ + SecretKeyRef: source.Net.SASL.Type.SecretKeyRef, + }, + }, + TLS: v1.KafkaTLSSpec{ + Enable: source.Net.TLS.Enable, + Cert: v1.SecretValueFromSource{ + SecretKeyRef: source.Net.TLS.Cert.SecretKeyRef, + }, + Key: v1.SecretValueFromSource{ + SecretKeyRef: source.Net.TLS.Key.SecretKeyRef, + }, + CACert: v1.SecretValueFromSource{ + SecretKeyRef: source.Net.TLS.CACert.SecretKeyRef, + }, + }, + }, + } + return sink } -// ConvertFrom implements apis.Convertible -func (sink *KafkaBinding) ConvertFrom(_ context.Context, source apis.Convertible) error { - return fmt.Errorf("v1beta1 is the highest known version, got: %T", source) -} +// ConvertFromV1 converts v1 to v1beta1 +func (sink *KafkaAuthSpec) ConvertFromV1(source *v1.KafkaAuthSpec) { + if source == nil { + return + } + sink.BootstrapServers = source.BootstrapServers -// ConvertTo implements apis.Convertible -func (source *KafkaAuthSpec) ConvertTo(_ context.Context, sink apis.Convertible) error { - return fmt.Errorf("v1beta1 is the highest known version, got: %T", sink) -} + sink.Net.SASL.Enable = source.Net.SASL.Enable + sink.Net.SASL.Type.SecretKeyRef = source.Net.SASL.Type.SecretKeyRef + sink.Net.SASL.User.SecretKeyRef = source.Net.SASL.User.SecretKeyRef + sink.Net.SASL.Password.SecretKeyRef = source.Net.SASL.Password.SecretKeyRef + + sink.Net.TLS.Enable = source.Net.TLS.Enable + sink.Net.TLS.Key.SecretKeyRef = source.Net.TLS.Key.SecretKeyRef + sink.Net.TLS.Cert.SecretKeyRef = source.Net.TLS.Cert.SecretKeyRef + sink.Net.TLS.CACert.SecretKeyRef = source.Net.TLS.CACert.SecretKeyRef -// ConvertFrom implements apis.Convertible -func (sink *KafkaAuthSpec) ConvertFrom(_ context.Context, source apis.Convertible) error { - return fmt.Errorf("v1beta1 is the highest known version, got: %T", source) + return } diff --git a/control-plane/pkg/apis/sources/v1/doc.go b/control-plane/pkg/apis/sources/v1/doc.go new file mode 100644 index 0000000000..877e4324e6 --- /dev/null +++ b/control-plane/pkg/apis/sources/v1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the sources v1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +groupName=sources.knative.dev +package v1 diff --git a/control-plane/pkg/apis/sources/v1/fuzzer.go b/control-plane/pkg/apis/sources/v1/fuzzer.go new file mode 100644 index 0000000000..1bbd15e2f5 --- /dev/null +++ b/control-plane/pkg/apis/sources/v1/fuzzer.go @@ -0,0 +1,45 @@ +/* +Copyright 2020 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + fuzz "github.com/google/gofuzz" + "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" + "k8s.io/apimachinery/pkg/runtime/serializer" + pkgfuzzer "knative.dev/pkg/apis/testing/fuzzer" +) + +// FuzzerFuncs includes fuzzing funcs for sources.knative.dev v1 types +// +// For other examples see +// https://github.com/kubernetes/apimachinery/blob/master/pkg/apis/meta/fuzzer/fuzzer.go +var FuzzerFuncs = fuzzer.MergeFuzzerFuncs( + func(codecs serializer.CodecFactory) []interface{} { + return []interface{}{ + func(s *KafkaSourceStatus, c fuzz.Continue) { + c.FuzzNoCustom(s) // fuzz the status object + + // Clear the random fuzzed condition + s.Status.SetConditions(nil) + + // Fuzz the known conditions except their type value + s.InitializeConditions() + pkgfuzzer.FuzzConditions(&s.Status, c) + }, + } + }, +) diff --git a/control-plane/pkg/apis/sources/v1/implements_test.go b/control-plane/pkg/apis/sources/v1/implements_test.go new file mode 100644 index 0000000000..dc70fef4ef --- /dev/null +++ b/control-plane/pkg/apis/sources/v1/implements_test.go @@ -0,0 +1,39 @@ +/* + * Copyright 2020 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1 + +import ( + "testing" + + "knative.dev/pkg/apis/duck" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +func TestTypesImplements(t *testing.T) { + testCases := []struct { + instance interface{} + iface duck.Implementable + }{ + {instance: &KafkaSource{}, iface: &duckv1.Conditions{}}, + {instance: &KafkaSource{}, iface: &duckv1.Source{}}, + } + for _, tc := range testCases { + if err := duck.VerifyType(tc.instance, tc.iface); err != nil { + t.Error(err) + } + } +} diff --git a/control-plane/pkg/apis/sources/v1/kafka_conversion.go b/control-plane/pkg/apis/sources/v1/kafka_conversion.go new file mode 100644 index 0000000000..f2b1ee24ae --- /dev/null +++ b/control-plane/pkg/apis/sources/v1/kafka_conversion.go @@ -0,0 +1,34 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + + "knative.dev/pkg/apis" +) + +// ConvertTo implements apis.Convertible +func (source *KafkaSource) ConvertTo(ctx context.Context, sink apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", sink) +} + +// ConvertFrom implements apis.Convertible +func (sink *KafkaSource) ConvertFrom(ctx context.Context, source apis.Convertible) error { + return fmt.Errorf("v1 is the highest known version, got: %T", source) +} diff --git a/control-plane/pkg/apis/sources/v1/kafka_defaults.go b/control-plane/pkg/apis/sources/v1/kafka_defaults.go new file mode 100644 index 0000000000..51cd5feb07 --- /dev/null +++ b/control-plane/pkg/apis/sources/v1/kafka_defaults.go @@ -0,0 +1,80 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "strconv" + + "github.com/google/uuid" + "k8s.io/utils/pointer" + + "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/config" + "knative.dev/pkg/apis" +) + +const ( + uuidPrefix = "knative-kafka-source-" + + classAnnotation = "autoscaling.knative.dev/class" + minScaleAnnotation = "autoscaling.knative.dev/minScale" + maxScaleAnnotation = "autoscaling.knative.dev/maxScale" + pollingIntervalAnnotation = "keda.autoscaling.knative.dev/pollingInterval" + cooldownPeriodAnnotation = "keda.autoscaling.knative.dev/cooldownPeriod" + kafkaLagThresholdAnnotation = "keda.autoscaling.knative.dev/kafkaLagThreshold" +) + +// SetDefaults ensures KafkaSource reflects the default values. +func (k *KafkaSource) SetDefaults(ctx context.Context) { + ctx = apis.WithinParent(ctx, k.ObjectMeta) + + if k.Spec.ConsumerGroup == "" { + k.Spec.ConsumerGroup = uuidPrefix + uuid.New().String() + } + + if k.Spec.Consumers == nil { + k.Spec.Consumers = pointer.Int32(1) + } + + if k.Spec.InitialOffset == "" { + k.Spec.InitialOffset = OffsetLatest + } + + if k.Spec.Ordering == nil { + deliveryOrdering := Ordered + k.Spec.Ordering = &deliveryOrdering + } + + kafkaConfig := config.FromContextOrDefaults(ctx) + kafkaDefaults := kafkaConfig.KafkaSourceDefaults + if kafkaDefaults.AutoscalingClass == config.KedaAutoscalingClass { + if k.Annotations == nil { + k.Annotations = map[string]string{} + } + k.Annotations[classAnnotation] = kafkaDefaults.AutoscalingClass + + // Set all annotations regardless of defaults + k.Annotations[minScaleAnnotation] = strconv.FormatInt(kafkaDefaults.MinScale, 10) + k.Annotations[maxScaleAnnotation] = strconv.FormatInt(kafkaDefaults.MaxScale, 10) + k.Annotations[pollingIntervalAnnotation] = strconv.FormatInt(kafkaDefaults.PollingInterval, 10) + k.Annotations[cooldownPeriodAnnotation] = strconv.FormatInt(kafkaDefaults.CooldownPeriod, 10) + k.Annotations[kafkaLagThresholdAnnotation] = strconv.FormatInt(kafkaDefaults.KafkaLagThreshold, 10) + } + + k.Spec.Sink.SetDefaults(ctx) + k.Spec.Delivery.SetDefaults(ctx) +} diff --git a/control-plane/pkg/apis/sources/v1/kafka_lifecycle.go b/control-plane/pkg/apis/sources/v1/kafka_lifecycle.go new file mode 100644 index 0000000000..5af5ac27fb --- /dev/null +++ b/control-plane/pkg/apis/sources/v1/kafka_lifecycle.go @@ -0,0 +1,186 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "sync" + + appsv1 "k8s.io/api/apps/v1" + "knative.dev/eventing/pkg/apis/duck" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" +) + +const ( + // KafkaConditionReady has status True when the KafkaSource is ready to send events. + KafkaConditionReady = apis.ConditionReady + + // KafkaConditionSinkProvided has status True when the KafkaSource has been configured with a sink target. + KafkaConditionSinkProvided apis.ConditionType = "SinkProvided" + + // KafkaConditionDeployed has status True when the KafkaSource has had it's receive adapter deployment created. + KafkaConditionDeployed apis.ConditionType = "Deployed" + + // KafkaConditionKeyType is True when the KafkaSource has been configured with valid key type for + // the key deserializer. + KafkaConditionKeyType apis.ConditionType = "KeyTypeCorrect" + + // KafkaConditionConnectionEstablished has status True when the Kafka configuration to use by the source + // succeeded in establishing a connection to Kafka. + KafkaConditionConnectionEstablished apis.ConditionType = "ConnectionEstablished" + + // KafkaConditionInitialOffsetsCommitted is True when the KafkaSource has committed the + // initial offset of all claims + KafkaConditionInitialOffsetsCommitted apis.ConditionType = "InitialOffsetsCommitted" + + // KafkaConditionOIDCIdentityCreated has status True when the KafkaSource has created an OIDC identity. + KafkaConditionOIDCIdentityCreated apis.ConditionType = "OIDCIdentityCreated" +) + +var ( + KafkaSourceCondSet = apis.NewLivingConditionSet( + KafkaConditionSinkProvided, + KafkaConditionDeployed, + KafkaConditionConnectionEstablished, + KafkaConditionInitialOffsetsCommitted, + KafkaConditionOIDCIdentityCreated, + ) + + kafkaCondSetLock = sync.RWMutex{} +) + +// RegisterAlternateKafkaConditionSet register an alternate apis.ConditionSet. +func RegisterAlternateKafkaConditionSet(conditionSet apis.ConditionSet) { + kafkaCondSetLock.Lock() + defer kafkaCondSetLock.Unlock() + + KafkaSourceCondSet = conditionSet +} + +// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface. +func (*KafkaSource) GetConditionSet() apis.ConditionSet { + return KafkaSourceCondSet +} + +func (s *KafkaSourceStatus) GetCondition(t apis.ConditionType) *apis.Condition { + return KafkaSourceCondSet.Manage(s).GetCondition(t) +} + +// IsReady returns true if the resource is ready overall. +func (s *KafkaSourceStatus) IsReady() bool { + return KafkaSourceCondSet.Manage(s).IsHappy() +} + +// InitializeConditions sets relevant unset conditions to Unknown state. +func (s *KafkaSourceStatus) InitializeConditions() { + KafkaSourceCondSet.Manage(s).InitializeConditions() +} + +// MarkSink sets the condition that the source has a sink configured. +func (s *KafkaSourceStatus) MarkSink(addr *duckv1.Addressable) { + if addr.URL != nil && !addr.URL.IsEmpty() { + s.SinkURI = addr.URL + s.SinkCACerts = addr.CACerts + s.SinkAudience = addr.Audience + KafkaSourceCondSet.Manage(s).MarkTrue(KafkaConditionSinkProvided) + } else { + KafkaSourceCondSet.Manage(s).MarkUnknown(KafkaConditionSinkProvided, "SinkEmpty", "Sink has resolved to empty.%s", "") + } +} + +// MarkNoSink sets the condition that the source does not have a sink configured. +func (s *KafkaSourceStatus) MarkNoSink(reason, messageFormat string, messageA ...interface{}) { + KafkaSourceCondSet.Manage(s).MarkFalse(KafkaConditionSinkProvided, reason, messageFormat, messageA...) +} + +func DeploymentIsAvailable(d *appsv1.DeploymentStatus, def bool) bool { + // Check if the Deployment is available. + for _, cond := range d.Conditions { + if cond.Type == appsv1.DeploymentAvailable { + return cond.Status == "True" + } + } + return def +} + +// MarkDeployed sets the condition that the source has been deployed. +func (s *KafkaSourceStatus) MarkDeployed(d *appsv1.Deployment) { + if duck.DeploymentIsAvailable(&d.Status, false) { + KafkaSourceCondSet.Manage(s).MarkTrue(KafkaConditionDeployed) + + // Propagate the number of consumers + s.Consumers = d.Status.Replicas + } else { + // I don't know how to propagate the status well, so just give the name of the Deployment + // for now. + KafkaSourceCondSet.Manage(s).MarkFalse(KafkaConditionDeployed, "DeploymentUnavailable", "The Deployment '%s' is unavailable.", d.Name) + } +} + +// MarkDeploying sets the condition that the source is deploying. +func (s *KafkaSourceStatus) MarkDeploying(reason, messageFormat string, messageA ...interface{}) { + KafkaSourceCondSet.Manage(s).MarkUnknown(KafkaConditionDeployed, reason, messageFormat, messageA...) +} + +// MarkNotDeployed sets the condition that the source has not been deployed. +func (s *KafkaSourceStatus) MarkNotDeployed(reason, messageFormat string, messageA ...interface{}) { + KafkaSourceCondSet.Manage(s).MarkFalse(KafkaConditionDeployed, reason, messageFormat, messageA...) +} + +func (s *KafkaSourceStatus) MarkKeyTypeCorrect() { + KafkaSourceCondSet.Manage(s).MarkTrue(KafkaConditionKeyType) +} + +func (s *KafkaSourceStatus) MarkKeyTypeIncorrect(reason, messageFormat string, messageA ...interface{}) { + KafkaSourceCondSet.Manage(s).MarkFalse(KafkaConditionKeyType, reason, messageFormat, messageA...) +} + +func (cs *KafkaSourceStatus) MarkConnectionEstablished() { + KafkaSourceCondSet.Manage(cs).MarkTrue(KafkaConditionConnectionEstablished) +} + +func (cs *KafkaSourceStatus) MarkConnectionNotEstablished(reason, messageFormat string, messageA ...interface{}) { + KafkaSourceCondSet.Manage(cs).MarkFalse(KafkaConditionConnectionEstablished, reason, messageFormat, messageA...) +} + +func (s *KafkaSourceStatus) MarkInitialOffsetCommitted() { + KafkaSourceCondSet.Manage(s).MarkTrue(KafkaConditionInitialOffsetsCommitted) +} + +func (s *KafkaSourceStatus) MarkInitialOffsetNotCommitted(reason, messageFormat string, messageA ...interface{}) { + KafkaSourceCondSet.Manage(s).MarkFalse(KafkaConditionInitialOffsetsCommitted, reason, messageFormat, messageA...) +} + +func (s *KafkaSourceStatus) MarkOIDCIdentityCreatedSucceeded() { + KafkaSourceCondSet.Manage(s).MarkTrue(KafkaConditionOIDCIdentityCreated) +} + +func (s *KafkaSourceStatus) MarkOIDCIdentityCreatedSucceededWithReason(reason, messageFormat string, messageA ...interface{}) { + KafkaSourceCondSet.Manage(s).MarkTrueWithReason(KafkaConditionOIDCIdentityCreated, reason, messageFormat, messageA...) +} + +func (s *KafkaSourceStatus) MarkOIDCIdentityCreatedFailed(reason, messageFormat string, messageA ...interface{}) { + KafkaSourceCondSet.Manage(s).MarkFalse(KafkaConditionOIDCIdentityCreated, reason, messageFormat, messageA...) +} + +func (s *KafkaSourceStatus) MarkOIDCIdentityCreatedUnknown(reason, messageFormat string, messageA ...interface{}) { + KafkaSourceCondSet.Manage(s).MarkUnknown(KafkaConditionOIDCIdentityCreated, reason, messageFormat, messageA...) +} + +func (s *KafkaSourceStatus) UpdateConsumerGroupStatus(status string) { + s.Claims = status +} diff --git a/control-plane/pkg/apis/sources/v1/kafka_lifecycle_mt.go b/control-plane/pkg/apis/sources/v1/kafka_lifecycle_mt.go new file mode 100644 index 0000000000..9e8c1f49c1 --- /dev/null +++ b/control-plane/pkg/apis/sources/v1/kafka_lifecycle_mt.go @@ -0,0 +1,39 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "knative.dev/pkg/apis" +) + +const ( + + // KafkaConditionScheduled is True when all KafkaSource consumers has been scheduled + KafkaConditionScheduled apis.ConditionType = "Scheduled" +) + +var ( + KafkaMTSourceCondSet = apis.NewLivingConditionSet(KafkaConditionSinkProvided, KafkaConditionScheduled, KafkaConditionInitialOffsetsCommitted, KafkaConditionConnectionEstablished) +) + +func (s *KafkaSourceStatus) MarkScheduled() { + KafkaSourceCondSet.Manage(s).MarkTrue(KafkaConditionScheduled) +} + +func (s *KafkaSourceStatus) MarkNotScheduled(reason, messageFormat string, messageA ...interface{}) { + KafkaSourceCondSet.Manage(s).MarkFalse(KafkaConditionScheduled, reason, messageFormat, messageA...) +} diff --git a/control-plane/pkg/apis/sources/v1/kafka_scheduling.go b/control-plane/pkg/apis/sources/v1/kafka_scheduling.go new file mode 100644 index 0000000000..3acced761d --- /dev/null +++ b/control-plane/pkg/apis/sources/v1/kafka_scheduling.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/types" + "knative.dev/eventing/pkg/apis/duck/v1alpha1" +) + +func (k *KafkaSource) GetKey() types.NamespacedName { + return types.NamespacedName{ + Namespace: k.Namespace, + Name: k.Name, + } +} + +func (k *KafkaSource) GetVReplicas() int32 { + if k.Spec.Consumers == nil { + return 1 + } + if k.Status.MaxAllowedVReplicas != nil { + if *k.Spec.Consumers > *k.Status.MaxAllowedVReplicas { + return *k.Status.MaxAllowedVReplicas + } + } + return *k.Spec.Consumers +} + +func (k *KafkaSource) GetPlacements() []v1alpha1.Placement { + if k.Status.Placeable.Placements == nil { + return nil + } + return k.Status.Placeable.Placements +} + +func (k *KafkaSource) GetResourceVersion() string { + return k.ObjectMeta.ResourceVersion +} diff --git a/control-plane/pkg/apis/sources/v1/kafka_types.go b/control-plane/pkg/apis/sources/v1/kafka_types.go new file mode 100644 index 0000000000..a137d81de0 --- /dev/null +++ b/control-plane/pkg/apis/sources/v1/kafka_types.go @@ -0,0 +1,169 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "knative.dev/eventing/pkg/apis/duck/v1alpha1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + "knative.dev/pkg/kmeta" + "knative.dev/pkg/webhook/resourcesemantics" + + bindingsv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1" +) + +// KafkaSource is the Schema for the kafkasources API. +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale +// +genreconciler +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true +type KafkaSource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec KafkaSourceSpec `json:"spec,omitempty"` + Status KafkaSourceStatus `json:"status,omitempty"` +} + +// Check that KafkaSource can be validated and can be defaulted. +var _ runtime.Object = (*KafkaSource)(nil) +var _ resourcesemantics.GenericCRD = (*KafkaSource)(nil) +var _ kmeta.OwnerRefable = (*KafkaSource)(nil) +var _ apis.Defaultable = (*KafkaSource)(nil) +var _ apis.Validatable = (*KafkaSource)(nil) +var _ duckv1.KRShaped = (*KafkaSource)(nil) + +// KafkaSourceSpec defines the desired state of the KafkaSource. +type KafkaSourceSpec struct { + // Number of desired consumers running in the consumer group. Defaults to 1. + // + // This is a pointer to distinguish between explicit + // zero and not specified. + // +optional + Consumers *int32 `json:"consumers,omitempty"` + + bindingsv1.KafkaAuthSpec `json:",inline"` + + // Topic topics to consume messages from + // +required + Topics []string `json:"topics"` + + // ConsumerGroupID is the consumer group ID. + // +optional + ConsumerGroup string `json:"consumerGroup,omitempty"` + + // InitialOffset is the Initial Offset for the consumer group. + // should be earliest or latest + // +optional + InitialOffset Offset `json:"initialOffset,omitempty"` + + // Delivery contains the delivery spec for this source + // +optional + Delivery *eventingduckv1.DeliverySpec `json:"delivery,omitempty"` + + // Ordering is the type of the consumer verticle. + // Should be ordered or unordered. + // By default, it is ordered. + // +optional + Ordering *DeliveryOrdering `json:"ordering,omitempty"` + + // inherits duck/v1 SourceSpec, which currently provides: + // * Sink - a reference to an object that will resolve to a domain name or + // a URI directly to use as the sink. + // * CloudEventOverrides - defines overrides to control the output format + // and modifications of the event sent to the sink. + duckv1.SourceSpec `json:",inline"` +} + +type DeliveryOrdering string +type Offset string + +const ( + // KafkaEventType is the Kafka CloudEvent type. + KafkaEventType = "dev.knative.kafka.event" + + KafkaKeyTypeLabel = "kafkasources.sources.knative.dev/key-type" + + // OffsetEarliest denotes the earliest offset in the kafka partition + OffsetEarliest Offset = "earliest" + + // OffsetLatest denotes the latest offset in the kafka partition + OffsetLatest Offset = "latest" +) + +var KafkaKeyTypeAllowed = []string{"string", "int", "float", "byte-array"} + +// KafkaEventSource returns the Kafka CloudEvent source. +func KafkaEventSource(namespace, kafkaSourceName, topic string) string { + return fmt.Sprintf("/apis/v1/namespaces/%s/kafkasources/%s#%s", namespace, kafkaSourceName, topic) +} + +// KafkaSourceStatus defines the observed state of KafkaSource. +type KafkaSourceStatus struct { + // inherits duck/v1 SourceStatus, which currently provides: + // * ObservedGeneration - the 'Generation' of the Service that was last + // processed by the controller. + // * Conditions - the latest available observations of a resource's current + // state. + // * SinkURI - the current active sink URI that has been configured for the + // Source. + duckv1.SourceStatus `json:",inline"` + + // Total number of consumers actually running in the consumer group. + // +optional + Consumers int32 `json:"consumers,omitempty"` + + // Use for labelSelectorPath when scaling Kafka source + // +optional + Selector string `json:"selector,omitempty"` + + // Claims consumed by this KafkaSource instance + // +optional + Claims string `json:"claims,omitempty"` + + // Implement Placeable. + // +optional + v1alpha1.Placeable `json:",inline"` +} + +func (*KafkaSource) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind("KafkaSource") +} + +// GetStatus retrieves the duck status for this resource. Implements the KRShaped interface. +func (k *KafkaSource) GetStatus() *duckv1.Status { + return &k.Status.Status +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KafkaSourceList contains a list of KafkaSources. +type KafkaSourceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []KafkaSource `json:"items"` +} diff --git a/control-plane/pkg/apis/sources/v1/kafka_validation.go b/control-plane/pkg/apis/sources/v1/kafka_validation.go new file mode 100644 index 0000000000..29640a68e2 --- /dev/null +++ b/control-plane/pkg/apis/sources/v1/kafka_validation.go @@ -0,0 +1,88 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + + "knative.dev/pkg/apis" + "knative.dev/pkg/kmp" +) + +// Validate ensures KafkaSource is properly configured. +func (ks *KafkaSource) Validate(ctx context.Context) *apis.FieldError { + errs := ks.Spec.Validate(ctx).ViaField("spec") + if apis.IsInUpdate(ctx) { + original := apis.GetBaseline(ctx).(*KafkaSource) + errs = errs.Also(ks.CheckImmutableFields(ctx, original)) + } + return errs +} + +func (kss *KafkaSourceSpec) Validate(ctx context.Context) *apis.FieldError { + var errs *apis.FieldError + + // Validate source spec + errs = errs.Also(kss.SourceSpec.Validate(ctx)) + + // Check for mandatory fields + if len(kss.Topics) <= 0 { + errs = errs.Also(apis.ErrMissingField("topics")) + } + if len(kss.BootstrapServers) <= 0 { + errs = errs.Also(apis.ErrMissingField("bootstrapServers")) + } + switch kss.InitialOffset { + case OffsetEarliest, OffsetLatest: + default: + errs = errs.Also(apis.ErrInvalidValue(kss.InitialOffset, "initialOffset")) + } + if kss.Ordering != nil { + switch *kss.Ordering { + case Unordered, Ordered: + default: + errs = errs.Also(apis.ErrInvalidValue(*kss.Ordering, "ordering")) + } + } + + return errs +} + +func (ks *KafkaSource) CheckImmutableFields(ctx context.Context, original *KafkaSource) *apis.FieldError { + if original == nil { + return nil + } + diff, err := kmp.ShortDiff(original.Spec.ConsumerGroup, ks.Spec.ConsumerGroup) + + if err != nil { + return &apis.FieldError{ + Message: "Failed to diff KafkaSource", + Paths: []string{"spec"}, + Details: err.Error(), + } + } + + if diff != "" { + return &apis.FieldError{ + Message: "Immutable fields changed (-old +new)", + Paths: []string{"spec"}, + Details: diff, + } + } + + return nil +} diff --git a/control-plane/pkg/apis/sources/v1/kafka_validation_test.go b/control-plane/pkg/apis/sources/v1/kafka_validation_test.go new file mode 100644 index 0000000000..0e873da955 --- /dev/null +++ b/control-plane/pkg/apis/sources/v1/kafka_validation_test.go @@ -0,0 +1,166 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1 + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "knative.dev/pkg/apis" + duckv1 "knative.dev/pkg/apis/duck/v1" + + bindingsv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1" +) + +func TestKafka_Validate(t *testing.T) { + validOrdering := Ordered + badOrdering := DeliveryOrdering("badOrder") + badInitialOffset := Offset("badbOffset") + + tests := []struct { + name string + ks *KafkaSource + ctx context.Context + want *apis.FieldError + }{ + { + name: "no bootstrap servers", + ks: &KafkaSource{ + Spec: KafkaSourceSpec{ + Topics: []string{"test-topic"}, + KafkaAuthSpec: bindingsv1.KafkaAuthSpec{}, + ConsumerGroup: "ks-group", + SourceSpec: duckv1.SourceSpec{ + Sink: NewSourceSinkReference(), + }, + }, + }, + ctx: context.Background(), + want: apis.ErrMissingField("spec.bootstrapServers"), + }, + { + name: "no topics", + ks: &KafkaSource{ + Spec: KafkaSourceSpec{ + KafkaAuthSpec: bindingsv1.KafkaAuthSpec{ + BootstrapServers: []string{"kafka:9092"}, + }, + ConsumerGroup: "ks-group", + SourceSpec: duckv1.SourceSpec{ + Sink: NewSourceSinkReference(), + }, + }, + }, + ctx: context.Background(), + want: apis.ErrMissingField("spec.topics"), + }, + { + name: "invalid ordering", + ks: &KafkaSource{ + Spec: KafkaSourceSpec{ + Topics: []string{"test-topic"}, + KafkaAuthSpec: bindingsv1.KafkaAuthSpec{ + BootstrapServers: []string{"kafka:9092"}, + }, + Ordering: &badOrdering, + ConsumerGroup: "ks-group", + SourceSpec: duckv1.SourceSpec{ + Sink: NewSourceSinkReference(), + }, + }, + }, + ctx: context.Background(), + want: apis.ErrInvalidValue(badOrdering, "spec.ordering"), + }, + { + name: "valid ordering", + ks: &KafkaSource{ + Spec: KafkaSourceSpec{ + Topics: []string{"test-topic"}, + KafkaAuthSpec: bindingsv1.KafkaAuthSpec{ + BootstrapServers: []string{"kafka:9092"}, + }, + Ordering: &validOrdering, + ConsumerGroup: "ks-group", + SourceSpec: duckv1.SourceSpec{ + Sink: NewSourceSinkReference(), + }, + }, + }, + ctx: context.Background(), + want: nil, + }, + { + name: "invalid initialOffset", + ks: &KafkaSource{ + Spec: KafkaSourceSpec{ + Topics: []string{"test-topic"}, + KafkaAuthSpec: bindingsv1.KafkaAuthSpec{ + BootstrapServers: []string{"kafka:9092"}, + }, + InitialOffset: badInitialOffset, + ConsumerGroup: "ks-group", + SourceSpec: duckv1.SourceSpec{ + Sink: NewSourceSinkReference(), + }, + }, + }, + ctx: context.Background(), + want: apis.ErrInvalidValue(badInitialOffset, "spec.initialOffset"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.ks.SetDefaults(tt.ctx) + if got := tt.ks.Validate(tt.ctx); got.Error() != tt.want.Error() { + t.Errorf("Validate() = %v, want %v", got, tt.want) + } + }) + } +} + +func NewService(mutations ...func(*corev1.Service)) *corev1.Service { + s := &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + Kind: "Service", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "test-service-name", + }, + } + for _, mut := range mutations { + mut(s) + } + return s +} + +func NewSourceSinkReference() duckv1.Destination { + s := NewService() + return duckv1.Destination{ + Ref: &duckv1.KReference{ + Kind: s.Kind, + Namespace: s.Namespace, + Name: s.Name, + APIVersion: s.APIVersion, + }, + } +} diff --git a/control-plane/pkg/apis/sources/v1/order.go b/control-plane/pkg/apis/sources/v1/order.go new file mode 100644 index 0000000000..f9b0c68233 --- /dev/null +++ b/control-plane/pkg/apis/sources/v1/order.go @@ -0,0 +1,55 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package v1 + +import ( + "context" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/pkg/apis" +) + +const ( + // Ordered is a per partition blocking consumer. + // It waits for a successful response of the sink before + // sending the next message in the partition. + Ordered DeliveryOrdering = "ordered" + // Unordered is non-blocking consumer that delivers + // events out of any particular order. + Unordered DeliveryOrdering = "unordered" +) + +var ( + deliveryOrders = sets.NewString( + string(Ordered), + string(Unordered), + ) + + deliveryOrdersString = strings.Join(deliveryOrders.List(), ",") +) + +func (d DeliveryOrdering) Validate(context.Context) *apis.FieldError { + if !deliveryOrders.Has(string(d)) { + return apis.ErrInvalidValue(d, "", "expected one of: ["+deliveryOrdersString+"]") + } + return nil +} + +var ( + _ apis.Validatable = DeliveryOrdering("") +) diff --git a/control-plane/pkg/apis/sources/v1/register.go b/control-plane/pkg/apis/sources/v1/register.go new file mode 100644 index 0000000000..8734a08832 --- /dev/null +++ b/control-plane/pkg/apis/sources/v1/register.go @@ -0,0 +1,59 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// NOTE: Boilerplate only. Ignore this file. + +// Package v1 contains API Schema definitions for the sources v1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +groupName=sources.knative.dev +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/eventing/pkg/apis/sources" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: sources.GroupName, Version: "v1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &KafkaSource{}, + &KafkaSourceList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/control-plane/pkg/apis/sources/v1/zz_generated.deepcopy.go b/control-plane/pkg/apis/sources/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..dd7160e540 --- /dev/null +++ b/control-plane/pkg/apis/sources/v1/zz_generated.deepcopy.go @@ -0,0 +1,144 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + duckv1 "knative.dev/eventing/pkg/apis/duck/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaSource) DeepCopyInto(out *KafkaSource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSource. +func (in *KafkaSource) DeepCopy() *KafkaSource { + if in == nil { + return nil + } + out := new(KafkaSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaSource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaSourceList) DeepCopyInto(out *KafkaSourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KafkaSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSourceList. +func (in *KafkaSourceList) DeepCopy() *KafkaSourceList { + if in == nil { + return nil + } + out := new(KafkaSourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KafkaSourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaSourceSpec) DeepCopyInto(out *KafkaSourceSpec) { + *out = *in + if in.Consumers != nil { + in, out := &in.Consumers, &out.Consumers + *out = new(int32) + **out = **in + } + in.KafkaAuthSpec.DeepCopyInto(&out.KafkaAuthSpec) + if in.Topics != nil { + in, out := &in.Topics, &out.Topics + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Delivery != nil { + in, out := &in.Delivery, &out.Delivery + *out = new(duckv1.DeliverySpec) + (*in).DeepCopyInto(*out) + } + if in.Ordering != nil { + in, out := &in.Ordering, &out.Ordering + *out = new(DeliveryOrdering) + **out = **in + } + in.SourceSpec.DeepCopyInto(&out.SourceSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSourceSpec. +func (in *KafkaSourceSpec) DeepCopy() *KafkaSourceSpec { + if in == nil { + return nil + } + out := new(KafkaSourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaSourceStatus) DeepCopyInto(out *KafkaSourceStatus) { + *out = *in + in.SourceStatus.DeepCopyInto(&out.SourceStatus) + in.Placeable.DeepCopyInto(&out.Placeable) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaSourceStatus. +func (in *KafkaSourceStatus) DeepCopy() *KafkaSourceStatus { + if in == nil { + return nil + } + out := new(KafkaSourceStatus) + in.DeepCopyInto(out) + return out +} diff --git a/control-plane/pkg/apis/sources/v1/zz_generated.defaults.go b/control-plane/pkg/apis/sources/v1/zz_generated.defaults.go new file mode 100644 index 0000000000..a0005a51d2 --- /dev/null +++ b/control-plane/pkg/apis/sources/v1/zz_generated.defaults.go @@ -0,0 +1,33 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/control-plane/pkg/apis/sources/v1beta1/kafka_conversion.go b/control-plane/pkg/apis/sources/v1beta1/kafka_conversion.go index 7f01a80608..79a882e88c 100644 --- a/control-plane/pkg/apis/sources/v1beta1/kafka_conversion.go +++ b/control-plane/pkg/apis/sources/v1beta1/kafka_conversion.go @@ -21,14 +21,67 @@ import ( "fmt" "knative.dev/pkg/apis" + + bindingsv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1beta1" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1" ) // ConvertTo implements apis.Convertible -func (source *KafkaSource) ConvertTo(ctx context.Context, sink apis.Convertible) error { - return fmt.Errorf("v1beta1 is the highest known version, got: %T", sink) +func (source *KafkaSource) ConvertTo(ctx context.Context, to apis.Convertible) error { + switch sink := to.(type) { + case *v1.KafkaSource: + source.ObjectMeta.DeepCopyInto(&sink.ObjectMeta) + sink.Spec = v1.KafkaSourceSpec{ + Consumers: source.Spec.Consumers, + KafkaAuthSpec: *source.Spec.KafkaAuthSpec.ConvertToV1(ctx), + Topics: source.Spec.Topics, + ConsumerGroup: source.Spec.ConsumerGroup, + InitialOffset: v1.Offset(source.Spec.InitialOffset), + Delivery: source.Spec.Delivery, + Ordering: (*v1.DeliveryOrdering)(source.Spec.Ordering), + SourceSpec: source.Spec.SourceSpec, + } + sink.Status = v1.KafkaSourceStatus{ + SourceStatus: *source.Status.SourceStatus.DeepCopy(), + Consumers: source.Status.Consumers, + Selector: source.Status.Selector, + Claims: source.Status.Claims, + Placeable: source.Status.Placeable, + } + return nil + default: + return fmt.Errorf("unknown version, got: %T", sink) + } } // ConvertFrom implements apis.Convertible -func (sink *KafkaSource) ConvertFrom(ctx context.Context, source apis.Convertible) error { - return fmt.Errorf("v1beta1 is the highest known version, got: %T", source) +func (sink *KafkaSource) ConvertFrom(ctx context.Context, from apis.Convertible) error { + + switch source := from.(type) { + case *v1.KafkaSource: + source.ObjectMeta.DeepCopyInto(&sink.ObjectMeta) + authSpec := bindingsv1beta1.KafkaAuthSpec{} + authSpec.ConvertFromV1(&source.Spec.KafkaAuthSpec) + sink.Spec = KafkaSourceSpec{ + Consumers: source.Spec.Consumers, + KafkaAuthSpec: authSpec, + Topics: source.Spec.Topics, + ConsumerGroup: source.Spec.ConsumerGroup, + InitialOffset: Offset(source.Spec.InitialOffset), + Delivery: source.Spec.Delivery, + Ordering: (*DeliveryOrdering)(source.Spec.Ordering), + SourceSpec: source.Spec.SourceSpec, + } + sink.Status = KafkaSourceStatus{ + SourceStatus: source.Status.SourceStatus, + Consumers: source.Status.Consumers, + Selector: source.Status.Selector, + Claims: source.Status.Claims, + Placeable: source.Status.Placeable, + } + + return nil + default: + return fmt.Errorf("unknown version, got: %T", source) + } } diff --git a/control-plane/pkg/apis/sources/v1beta1/kafka_defaults.go b/control-plane/pkg/apis/sources/v1beta1/kafka_defaults.go index cd6d02caff..fcd75b2683 100644 --- a/control-plane/pkg/apis/sources/v1beta1/kafka_defaults.go +++ b/control-plane/pkg/apis/sources/v1beta1/kafka_defaults.go @@ -21,10 +21,11 @@ import ( "strconv" "github.com/google/uuid" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" - "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/config" "knative.dev/pkg/apis" + + "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/config" ) const ( @@ -47,7 +48,7 @@ func (k *KafkaSource) SetDefaults(ctx context.Context) { } if k.Spec.Consumers == nil { - k.Spec.Consumers = pointer.Int32(1) + k.Spec.Consumers = ptr.To(int32(1)) } if k.Spec.InitialOffset == "" { diff --git a/control-plane/pkg/client/clientset/versioned/clientset.go b/control-plane/pkg/client/clientset/versioned/clientset.go index b67f8a6b73..2ce9b59834 100644 --- a/control-plane/pkg/client/clientset/versioned/clientset.go +++ b/control-plane/pkg/client/clientset/versioned/clientset.go @@ -25,32 +25,43 @@ import ( discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" + bindingsv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/bindings/v1" bindingsv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/bindings/v1beta1" eventingv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/eventing/v1alpha1" internalv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/internalskafkaeventing/v1alpha1" messagingv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/messaging/v1beta1" + sourcesv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/sources/v1" sourcesv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/sources/v1beta1" ) type Interface interface { Discovery() discovery.DiscoveryInterface + BindingsV1() bindingsv1.BindingsV1Interface BindingsV1beta1() bindingsv1beta1.BindingsV1beta1Interface EventingV1alpha1() eventingv1alpha1.EventingV1alpha1Interface InternalV1alpha1() internalv1alpha1.InternalV1alpha1Interface MessagingV1beta1() messagingv1beta1.MessagingV1beta1Interface + SourcesV1() sourcesv1.SourcesV1Interface SourcesV1beta1() sourcesv1beta1.SourcesV1beta1Interface } // Clientset contains the clients for groups. type Clientset struct { *discovery.DiscoveryClient + bindingsV1 *bindingsv1.BindingsV1Client bindingsV1beta1 *bindingsv1beta1.BindingsV1beta1Client eventingV1alpha1 *eventingv1alpha1.EventingV1alpha1Client internalV1alpha1 *internalv1alpha1.InternalV1alpha1Client messagingV1beta1 *messagingv1beta1.MessagingV1beta1Client + sourcesV1 *sourcesv1.SourcesV1Client sourcesV1beta1 *sourcesv1beta1.SourcesV1beta1Client } +// BindingsV1 retrieves the BindingsV1Client +func (c *Clientset) BindingsV1() bindingsv1.BindingsV1Interface { + return c.bindingsV1 +} + // BindingsV1beta1 retrieves the BindingsV1beta1Client func (c *Clientset) BindingsV1beta1() bindingsv1beta1.BindingsV1beta1Interface { return c.bindingsV1beta1 @@ -71,6 +82,11 @@ func (c *Clientset) MessagingV1beta1() messagingv1beta1.MessagingV1beta1Interfac return c.messagingV1beta1 } +// SourcesV1 retrieves the SourcesV1Client +func (c *Clientset) SourcesV1() sourcesv1.SourcesV1Interface { + return c.sourcesV1 +} + // SourcesV1beta1 retrieves the SourcesV1beta1Client func (c *Clientset) SourcesV1beta1() sourcesv1beta1.SourcesV1beta1Interface { return c.sourcesV1beta1 @@ -120,6 +136,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, var cs Clientset var err error + cs.bindingsV1, err = bindingsv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.bindingsV1beta1, err = bindingsv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -136,6 +156,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.sourcesV1, err = sourcesv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.sourcesV1beta1, err = sourcesv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -161,10 +185,12 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset + cs.bindingsV1 = bindingsv1.New(c) cs.bindingsV1beta1 = bindingsv1beta1.New(c) cs.eventingV1alpha1 = eventingv1alpha1.New(c) cs.internalV1alpha1 = internalv1alpha1.New(c) cs.messagingV1beta1 = messagingv1beta1.New(c) + cs.sourcesV1 = sourcesv1.New(c) cs.sourcesV1beta1 = sourcesv1beta1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) diff --git a/control-plane/pkg/client/clientset/versioned/fake/clientset_generated.go b/control-plane/pkg/client/clientset/versioned/fake/clientset_generated.go index 9ea1ba8a13..a756c4f938 100644 --- a/control-plane/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/control-plane/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -25,6 +25,8 @@ import ( fakediscovery "k8s.io/client-go/discovery/fake" "k8s.io/client-go/testing" clientset "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned" + bindingsv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/bindings/v1" + fakebindingsv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/fake" bindingsv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/bindings/v1beta1" fakebindingsv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/bindings/v1beta1/fake" eventingv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/eventing/v1alpha1" @@ -33,6 +35,8 @@ import ( fakeinternalv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/internalskafkaeventing/v1alpha1/fake" messagingv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/messaging/v1beta1" fakemessagingv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/messaging/v1beta1/fake" + sourcesv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/sources/v1" + fakesourcesv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/sources/v1/fake" sourcesv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/sources/v1beta1" fakesourcesv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/sources/v1beta1/fake" ) @@ -87,6 +91,11 @@ var ( _ testing.FakeClient = &Clientset{} ) +// BindingsV1 retrieves the BindingsV1Client +func (c *Clientset) BindingsV1() bindingsv1.BindingsV1Interface { + return &fakebindingsv1.FakeBindingsV1{Fake: &c.Fake} +} + // BindingsV1beta1 retrieves the BindingsV1beta1Client func (c *Clientset) BindingsV1beta1() bindingsv1beta1.BindingsV1beta1Interface { return &fakebindingsv1beta1.FakeBindingsV1beta1{Fake: &c.Fake} @@ -107,6 +116,11 @@ func (c *Clientset) MessagingV1beta1() messagingv1beta1.MessagingV1beta1Interfac return &fakemessagingv1beta1.FakeMessagingV1beta1{Fake: &c.Fake} } +// SourcesV1 retrieves the SourcesV1Client +func (c *Clientset) SourcesV1() sourcesv1.SourcesV1Interface { + return &fakesourcesv1.FakeSourcesV1{Fake: &c.Fake} +} + // SourcesV1beta1 retrieves the SourcesV1beta1Client func (c *Clientset) SourcesV1beta1() sourcesv1beta1.SourcesV1beta1Interface { return &fakesourcesv1beta1.FakeSourcesV1beta1{Fake: &c.Fake} diff --git a/control-plane/pkg/client/clientset/versioned/fake/register.go b/control-plane/pkg/client/clientset/versioned/fake/register.go index 6c5909a0a9..34e19e4050 100644 --- a/control-plane/pkg/client/clientset/versioned/fake/register.go +++ b/control-plane/pkg/client/clientset/versioned/fake/register.go @@ -24,10 +24,12 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + bindingsv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1" bindingsv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1beta1" eventingv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/eventing/v1alpha1" internalv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/internalskafkaeventing/v1alpha1" messagingv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1beta1" + sourcesv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1" sourcesv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1beta1" ) @@ -35,10 +37,12 @@ var scheme = runtime.NewScheme() var codecs = serializer.NewCodecFactory(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ + bindingsv1.AddToScheme, bindingsv1beta1.AddToScheme, eventingv1alpha1.AddToScheme, internalv1alpha1.AddToScheme, messagingv1beta1.AddToScheme, + sourcesv1.AddToScheme, sourcesv1beta1.AddToScheme, } diff --git a/control-plane/pkg/client/clientset/versioned/scheme/register.go b/control-plane/pkg/client/clientset/versioned/scheme/register.go index cb15517a40..cff3156b9c 100644 --- a/control-plane/pkg/client/clientset/versioned/scheme/register.go +++ b/control-plane/pkg/client/clientset/versioned/scheme/register.go @@ -24,10 +24,12 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" serializer "k8s.io/apimachinery/pkg/runtime/serializer" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + bindingsv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1" bindingsv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1beta1" eventingv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/eventing/v1alpha1" internalv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/internalskafkaeventing/v1alpha1" messagingv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1beta1" + sourcesv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1" sourcesv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1beta1" ) @@ -35,10 +37,12 @@ var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ + bindingsv1.AddToScheme, bindingsv1beta1.AddToScheme, eventingv1alpha1.AddToScheme, internalv1alpha1.AddToScheme, messagingv1beta1.AddToScheme, + sourcesv1.AddToScheme, sourcesv1beta1.AddToScheme, } diff --git a/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/bindings_client.go b/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/bindings_client.go new file mode 100644 index 0000000000..5daa23cb1f --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/bindings_client.go @@ -0,0 +1,107 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + rest "k8s.io/client-go/rest" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1" + "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/scheme" +) + +type BindingsV1Interface interface { + RESTClient() rest.Interface + KafkaBindingsGetter +} + +// BindingsV1Client is used to interact with features provided by the bindings.knative.dev group. +type BindingsV1Client struct { + restClient rest.Interface +} + +func (c *BindingsV1Client) KafkaBindings(namespace string) KafkaBindingInterface { + return newKafkaBindings(c, namespace) +} + +// NewForConfig creates a new BindingsV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*BindingsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new BindingsV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*BindingsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &BindingsV1Client{client}, nil +} + +// NewForConfigOrDie creates a new BindingsV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *BindingsV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new BindingsV1Client for the given RESTClient. +func New(c rest.Interface) *BindingsV1Client { + return &BindingsV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *BindingsV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/doc.go b/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/doc.go new file mode 100644 index 0000000000..54167ac41c --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/doc.go @@ -0,0 +1,20 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/fake/doc.go b/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/fake/doc.go new file mode 100644 index 0000000000..7f850700e3 --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/fake/fake_bindings_client.go b/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/fake/fake_bindings_client.go new file mode 100644 index 0000000000..c68f0fb6f2 --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/fake/fake_bindings_client.go @@ -0,0 +1,40 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/bindings/v1" +) + +type FakeBindingsV1 struct { + *testing.Fake +} + +func (c *FakeBindingsV1) KafkaBindings(namespace string) v1.KafkaBindingInterface { + return &FakeKafkaBindings{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeBindingsV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/fake/fake_kafkabinding.go b/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/fake/fake_kafkabinding.go new file mode 100644 index 0000000000..e5a92aa147 --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/fake/fake_kafkabinding.go @@ -0,0 +1,141 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1" +) + +// FakeKafkaBindings implements KafkaBindingInterface +type FakeKafkaBindings struct { + Fake *FakeBindingsV1 + ns string +} + +var kafkabindingsResource = v1.SchemeGroupVersion.WithResource("kafkabindings") + +var kafkabindingsKind = v1.SchemeGroupVersion.WithKind("KafkaBinding") + +// Get takes name of the kafkaBinding, and returns the corresponding kafkaBinding object, and an error if there is any. +func (c *FakeKafkaBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.KafkaBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(kafkabindingsResource, c.ns, name), &v1.KafkaBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.KafkaBinding), err +} + +// List takes label and field selectors, and returns the list of KafkaBindings that match those selectors. +func (c *FakeKafkaBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.KafkaBindingList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(kafkabindingsResource, kafkabindingsKind, c.ns, opts), &v1.KafkaBindingList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.KafkaBindingList{ListMeta: obj.(*v1.KafkaBindingList).ListMeta} + for _, item := range obj.(*v1.KafkaBindingList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kafkaBindings. +func (c *FakeKafkaBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(kafkabindingsResource, c.ns, opts)) + +} + +// Create takes the representation of a kafkaBinding and creates it. Returns the server's representation of the kafkaBinding, and an error, if there is any. +func (c *FakeKafkaBindings) Create(ctx context.Context, kafkaBinding *v1.KafkaBinding, opts metav1.CreateOptions) (result *v1.KafkaBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(kafkabindingsResource, c.ns, kafkaBinding), &v1.KafkaBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.KafkaBinding), err +} + +// Update takes the representation of a kafkaBinding and updates it. Returns the server's representation of the kafkaBinding, and an error, if there is any. +func (c *FakeKafkaBindings) Update(ctx context.Context, kafkaBinding *v1.KafkaBinding, opts metav1.UpdateOptions) (result *v1.KafkaBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(kafkabindingsResource, c.ns, kafkaBinding), &v1.KafkaBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.KafkaBinding), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKafkaBindings) UpdateStatus(ctx context.Context, kafkaBinding *v1.KafkaBinding, opts metav1.UpdateOptions) (*v1.KafkaBinding, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(kafkabindingsResource, "status", c.ns, kafkaBinding), &v1.KafkaBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.KafkaBinding), err +} + +// Delete takes name of the kafkaBinding and deletes it. Returns an error if one occurs. +func (c *FakeKafkaBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(kafkabindingsResource, c.ns, name, opts), &v1.KafkaBinding{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKafkaBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionAction(kafkabindingsResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1.KafkaBindingList{}) + return err +} + +// Patch applies the patch and returns the patched kafkaBinding. +func (c *FakeKafkaBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.KafkaBinding, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(kafkabindingsResource, c.ns, name, pt, data, subresources...), &v1.KafkaBinding{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.KafkaBinding), err +} diff --git a/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/generated_expansion.go b/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/generated_expansion.go new file mode 100644 index 0000000000..c0b0fa99e7 --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type KafkaBindingExpansion interface{} diff --git a/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/kafkabinding.go b/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/kafkabinding.go new file mode 100644 index 0000000000..7204c155df --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/bindings/v1/kafkabinding.go @@ -0,0 +1,195 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1" + scheme "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/scheme" +) + +// KafkaBindingsGetter has a method to return a KafkaBindingInterface. +// A group's client should implement this interface. +type KafkaBindingsGetter interface { + KafkaBindings(namespace string) KafkaBindingInterface +} + +// KafkaBindingInterface has methods to work with KafkaBinding resources. +type KafkaBindingInterface interface { + Create(ctx context.Context, kafkaBinding *v1.KafkaBinding, opts metav1.CreateOptions) (*v1.KafkaBinding, error) + Update(ctx context.Context, kafkaBinding *v1.KafkaBinding, opts metav1.UpdateOptions) (*v1.KafkaBinding, error) + UpdateStatus(ctx context.Context, kafkaBinding *v1.KafkaBinding, opts metav1.UpdateOptions) (*v1.KafkaBinding, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.KafkaBinding, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.KafkaBindingList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.KafkaBinding, err error) + KafkaBindingExpansion +} + +// kafkaBindings implements KafkaBindingInterface +type kafkaBindings struct { + client rest.Interface + ns string +} + +// newKafkaBindings returns a KafkaBindings +func newKafkaBindings(c *BindingsV1Client, namespace string) *kafkaBindings { + return &kafkaBindings{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the kafkaBinding, and returns the corresponding kafkaBinding object, and an error if there is any. +func (c *kafkaBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.KafkaBinding, err error) { + result = &v1.KafkaBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kafkabindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of KafkaBindings that match those selectors. +func (c *kafkaBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.KafkaBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.KafkaBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kafkabindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested kafkaBindings. +func (c *kafkaBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("kafkabindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a kafkaBinding and creates it. Returns the server's representation of the kafkaBinding, and an error, if there is any. +func (c *kafkaBindings) Create(ctx context.Context, kafkaBinding *v1.KafkaBinding, opts metav1.CreateOptions) (result *v1.KafkaBinding, err error) { + result = &v1.KafkaBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("kafkabindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kafkaBinding). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a kafkaBinding and updates it. Returns the server's representation of the kafkaBinding, and an error, if there is any. +func (c *kafkaBindings) Update(ctx context.Context, kafkaBinding *v1.KafkaBinding, opts metav1.UpdateOptions) (result *v1.KafkaBinding, err error) { + result = &v1.KafkaBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kafkabindings"). + Name(kafkaBinding.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kafkaBinding). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *kafkaBindings) UpdateStatus(ctx context.Context, kafkaBinding *v1.KafkaBinding, opts metav1.UpdateOptions) (result *v1.KafkaBinding, err error) { + result = &v1.KafkaBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kafkabindings"). + Name(kafkaBinding.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kafkaBinding). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the kafkaBinding and deletes it. Returns an error if one occurs. +func (c *kafkaBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("kafkabindings"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *kafkaBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("kafkabindings"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched kafkaBinding. +func (c *kafkaBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.KafkaBinding, err error) { + result = &v1.KafkaBinding{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("kafkabindings"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/control-plane/pkg/client/clientset/versioned/typed/sources/v1/doc.go b/control-plane/pkg/client/clientset/versioned/typed/sources/v1/doc.go new file mode 100644 index 0000000000..54167ac41c --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/sources/v1/doc.go @@ -0,0 +1,20 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/control-plane/pkg/client/clientset/versioned/typed/sources/v1/fake/doc.go b/control-plane/pkg/client/clientset/versioned/typed/sources/v1/fake/doc.go new file mode 100644 index 0000000000..7f850700e3 --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/sources/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/control-plane/pkg/client/clientset/versioned/typed/sources/v1/fake/fake_kafkasource.go b/control-plane/pkg/client/clientset/versioned/typed/sources/v1/fake/fake_kafkasource.go new file mode 100644 index 0000000000..0b86b35ae6 --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/sources/v1/fake/fake_kafkasource.go @@ -0,0 +1,164 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + autoscalingv1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1" +) + +// FakeKafkaSources implements KafkaSourceInterface +type FakeKafkaSources struct { + Fake *FakeSourcesV1 + ns string +} + +var kafkasourcesResource = v1.SchemeGroupVersion.WithResource("kafkasources") + +var kafkasourcesKind = v1.SchemeGroupVersion.WithKind("KafkaSource") + +// Get takes name of the kafkaSource, and returns the corresponding kafkaSource object, and an error if there is any. +func (c *FakeKafkaSources) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.KafkaSource, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(kafkasourcesResource, c.ns, name), &v1.KafkaSource{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.KafkaSource), err +} + +// List takes label and field selectors, and returns the list of KafkaSources that match those selectors. +func (c *FakeKafkaSources) List(ctx context.Context, opts metav1.ListOptions) (result *v1.KafkaSourceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(kafkasourcesResource, kafkasourcesKind, c.ns, opts), &v1.KafkaSourceList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.KafkaSourceList{ListMeta: obj.(*v1.KafkaSourceList).ListMeta} + for _, item := range obj.(*v1.KafkaSourceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested kafkaSources. +func (c *FakeKafkaSources) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(kafkasourcesResource, c.ns, opts)) + +} + +// Create takes the representation of a kafkaSource and creates it. Returns the server's representation of the kafkaSource, and an error, if there is any. +func (c *FakeKafkaSources) Create(ctx context.Context, kafkaSource *v1.KafkaSource, opts metav1.CreateOptions) (result *v1.KafkaSource, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(kafkasourcesResource, c.ns, kafkaSource), &v1.KafkaSource{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.KafkaSource), err +} + +// Update takes the representation of a kafkaSource and updates it. Returns the server's representation of the kafkaSource, and an error, if there is any. +func (c *FakeKafkaSources) Update(ctx context.Context, kafkaSource *v1.KafkaSource, opts metav1.UpdateOptions) (result *v1.KafkaSource, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(kafkasourcesResource, c.ns, kafkaSource), &v1.KafkaSource{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.KafkaSource), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeKafkaSources) UpdateStatus(ctx context.Context, kafkaSource *v1.KafkaSource, opts metav1.UpdateOptions) (*v1.KafkaSource, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(kafkasourcesResource, "status", c.ns, kafkaSource), &v1.KafkaSource{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.KafkaSource), err +} + +// Delete takes name of the kafkaSource and deletes it. Returns an error if one occurs. +func (c *FakeKafkaSources) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteActionWithOptions(kafkasourcesResource, c.ns, name, opts), &v1.KafkaSource{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeKafkaSources) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewDeleteCollectionAction(kafkasourcesResource, c.ns, listOpts) + + _, err := c.Fake.Invokes(action, &v1.KafkaSourceList{}) + return err +} + +// Patch applies the patch and returns the patched kafkaSource. +func (c *FakeKafkaSources) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.KafkaSource, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(kafkasourcesResource, c.ns, name, pt, data, subresources...), &v1.KafkaSource{}) + + if obj == nil { + return nil, err + } + return obj.(*v1.KafkaSource), err +} + +// GetScale takes name of the kafkaSource, and returns the corresponding scale object, and an error if there is any. +func (c *FakeKafkaSources) GetScale(ctx context.Context, kafkaSourceName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetSubresourceAction(kafkasourcesResource, c.ns, "scale", kafkaSourceName), &autoscalingv1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscalingv1.Scale), err +} + +// UpdateScale takes the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *FakeKafkaSources) UpdateScale(ctx context.Context, kafkaSourceName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(kafkasourcesResource, "scale", c.ns, scale), &autoscalingv1.Scale{}) + + if obj == nil { + return nil, err + } + return obj.(*autoscalingv1.Scale), err +} diff --git a/control-plane/pkg/client/clientset/versioned/typed/sources/v1/fake/fake_sources_client.go b/control-plane/pkg/client/clientset/versioned/typed/sources/v1/fake/fake_sources_client.go new file mode 100644 index 0000000000..ea7c9b4688 --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/sources/v1/fake/fake_sources_client.go @@ -0,0 +1,40 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/typed/sources/v1" +) + +type FakeSourcesV1 struct { + *testing.Fake +} + +func (c *FakeSourcesV1) KafkaSources(namespace string) v1.KafkaSourceInterface { + return &FakeKafkaSources{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeSourcesV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/control-plane/pkg/client/clientset/versioned/typed/sources/v1/generated_expansion.go b/control-plane/pkg/client/clientset/versioned/typed/sources/v1/generated_expansion.go new file mode 100644 index 0000000000..50ad87092d --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/sources/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type KafkaSourceExpansion interface{} diff --git a/control-plane/pkg/client/clientset/versioned/typed/sources/v1/kafkasource.go b/control-plane/pkg/client/clientset/versioned/typed/sources/v1/kafkasource.go new file mode 100644 index 0000000000..eeb8572132 --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/sources/v1/kafkasource.go @@ -0,0 +1,228 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + autoscalingv1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1" + scheme "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/scheme" +) + +// KafkaSourcesGetter has a method to return a KafkaSourceInterface. +// A group's client should implement this interface. +type KafkaSourcesGetter interface { + KafkaSources(namespace string) KafkaSourceInterface +} + +// KafkaSourceInterface has methods to work with KafkaSource resources. +type KafkaSourceInterface interface { + Create(ctx context.Context, kafkaSource *v1.KafkaSource, opts metav1.CreateOptions) (*v1.KafkaSource, error) + Update(ctx context.Context, kafkaSource *v1.KafkaSource, opts metav1.UpdateOptions) (*v1.KafkaSource, error) + UpdateStatus(ctx context.Context, kafkaSource *v1.KafkaSource, opts metav1.UpdateOptions) (*v1.KafkaSource, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.KafkaSource, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.KafkaSourceList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.KafkaSource, err error) + GetScale(ctx context.Context, kafkaSourceName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(ctx context.Context, kafkaSourceName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) + + KafkaSourceExpansion +} + +// kafkaSources implements KafkaSourceInterface +type kafkaSources struct { + client rest.Interface + ns string +} + +// newKafkaSources returns a KafkaSources +func newKafkaSources(c *SourcesV1Client, namespace string) *kafkaSources { + return &kafkaSources{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the kafkaSource, and returns the corresponding kafkaSource object, and an error if there is any. +func (c *kafkaSources) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.KafkaSource, err error) { + result = &v1.KafkaSource{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kafkasources"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of KafkaSources that match those selectors. +func (c *kafkaSources) List(ctx context.Context, opts metav1.ListOptions) (result *v1.KafkaSourceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.KafkaSourceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kafkasources"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested kafkaSources. +func (c *kafkaSources) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("kafkasources"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a kafkaSource and creates it. Returns the server's representation of the kafkaSource, and an error, if there is any. +func (c *kafkaSources) Create(ctx context.Context, kafkaSource *v1.KafkaSource, opts metav1.CreateOptions) (result *v1.KafkaSource, err error) { + result = &v1.KafkaSource{} + err = c.client.Post(). + Namespace(c.ns). + Resource("kafkasources"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kafkaSource). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a kafkaSource and updates it. Returns the server's representation of the kafkaSource, and an error, if there is any. +func (c *kafkaSources) Update(ctx context.Context, kafkaSource *v1.KafkaSource, opts metav1.UpdateOptions) (result *v1.KafkaSource, err error) { + result = &v1.KafkaSource{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kafkasources"). + Name(kafkaSource.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kafkaSource). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *kafkaSources) UpdateStatus(ctx context.Context, kafkaSource *v1.KafkaSource, opts metav1.UpdateOptions) (result *v1.KafkaSource, err error) { + result = &v1.KafkaSource{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kafkasources"). + Name(kafkaSource.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(kafkaSource). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the kafkaSource and deletes it. Returns an error if one occurs. +func (c *kafkaSources) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("kafkasources"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *kafkaSources) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("kafkasources"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched kafkaSource. +func (c *kafkaSources) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.KafkaSource, err error) { + result = &v1.KafkaSource{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("kafkasources"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// GetScale takes name of the kafkaSource, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *kafkaSources) GetScale(ctx context.Context, kafkaSourceName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("kafkasources"). + Name(kafkaSourceName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *kafkaSources) UpdateScale(ctx context.Context, kafkaSourceName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("kafkasources"). + Name(kafkaSourceName). + SubResource("scale"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(scale). + Do(ctx). + Into(result) + return +} diff --git a/control-plane/pkg/client/clientset/versioned/typed/sources/v1/sources_client.go b/control-plane/pkg/client/clientset/versioned/typed/sources/v1/sources_client.go new file mode 100644 index 0000000000..ea0ff42e4f --- /dev/null +++ b/control-plane/pkg/client/clientset/versioned/typed/sources/v1/sources_client.go @@ -0,0 +1,107 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + rest "k8s.io/client-go/rest" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1" + "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/scheme" +) + +type SourcesV1Interface interface { + RESTClient() rest.Interface + KafkaSourcesGetter +} + +// SourcesV1Client is used to interact with features provided by the sources.knative.dev group. +type SourcesV1Client struct { + restClient rest.Interface +} + +func (c *SourcesV1Client) KafkaSources(namespace string) KafkaSourceInterface { + return newKafkaSources(c, namespace) +} + +// NewForConfig creates a new SourcesV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*SourcesV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new SourcesV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SourcesV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &SourcesV1Client{client}, nil +} + +// NewForConfigOrDie creates a new SourcesV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SourcesV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SourcesV1Client for the given RESTClient. +func New(c rest.Interface) *SourcesV1Client { + return &SourcesV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SourcesV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/control-plane/pkg/client/informers/externalversions/bindings/interface.go b/control-plane/pkg/client/informers/externalversions/bindings/interface.go index 5c2fef8944..686913b146 100644 --- a/control-plane/pkg/client/informers/externalversions/bindings/interface.go +++ b/control-plane/pkg/client/informers/externalversions/bindings/interface.go @@ -19,12 +19,15 @@ package bindings import ( + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/bindings/v1" v1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/bindings/v1beta1" internalinterfaces "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/internalinterfaces" ) // Interface provides access to each of this group's versions. type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -40,6 +43,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/control-plane/pkg/client/informers/externalversions/bindings/v1/interface.go b/control-plane/pkg/client/informers/externalversions/bindings/v1/interface.go new file mode 100644 index 0000000000..2cf7c108d7 --- /dev/null +++ b/control-plane/pkg/client/informers/externalversions/bindings/v1/interface.go @@ -0,0 +1,45 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // KafkaBindings returns a KafkaBindingInformer. + KafkaBindings() KafkaBindingInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// KafkaBindings returns a KafkaBindingInformer. +func (v *version) KafkaBindings() KafkaBindingInformer { + return &kafkaBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/control-plane/pkg/client/informers/externalversions/bindings/v1/kafkabinding.go b/control-plane/pkg/client/informers/externalversions/bindings/v1/kafkabinding.go new file mode 100644 index 0000000000..1bdf31c713 --- /dev/null +++ b/control-plane/pkg/client/informers/externalversions/bindings/v1/kafkabinding.go @@ -0,0 +1,90 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + bindingsv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1" + versioned "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/internalinterfaces" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/listers/bindings/v1" +) + +// KafkaBindingInformer provides access to a shared informer and lister for +// KafkaBindings. +type KafkaBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.KafkaBindingLister +} + +type kafkaBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewKafkaBindingInformer constructs a new informer for KafkaBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewKafkaBindingInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredKafkaBindingInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredKafkaBindingInformer constructs a new informer for KafkaBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredKafkaBindingInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BindingsV1().KafkaBindings(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BindingsV1().KafkaBindings(namespace).Watch(context.TODO(), options) + }, + }, + &bindingsv1.KafkaBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *kafkaBindingInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredKafkaBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *kafkaBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&bindingsv1.KafkaBinding{}, f.defaultInformer) +} + +func (f *kafkaBindingInformer) Lister() v1.KafkaBindingLister { + return v1.NewKafkaBindingLister(f.Informer().GetIndexer()) +} diff --git a/control-plane/pkg/client/informers/externalversions/generic.go b/control-plane/pkg/client/informers/externalversions/generic.go index e07081a238..61384009ff 100644 --- a/control-plane/pkg/client/informers/externalversions/generic.go +++ b/control-plane/pkg/client/informers/externalversions/generic.go @@ -23,10 +23,12 @@ import ( schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1" v1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1beta1" v1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/eventing/v1alpha1" internalskafkaeventingv1alpha1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/internalskafkaeventing/v1alpha1" messagingv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/messaging/v1beta1" + sourcesv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1" sourcesv1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1beta1" ) @@ -56,7 +58,11 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=bindings.knative.dev, Version=v1beta1 + // Group=bindings.knative.dev, Version=v1 + case v1.SchemeGroupVersion.WithResource("kafkabindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Bindings().V1().KafkaBindings().Informer()}, nil + + // Group=bindings.knative.dev, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithResource("kafkabindings"): return &genericInformer{resource: resource.GroupResource(), informer: f.Bindings().V1beta1().KafkaBindings().Informer()}, nil @@ -74,6 +80,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case messagingv1beta1.SchemeGroupVersion.WithResource("kafkachannels"): return &genericInformer{resource: resource.GroupResource(), informer: f.Messaging().V1beta1().KafkaChannels().Informer()}, nil + // Group=sources.knative.dev, Version=v1 + case sourcesv1.SchemeGroupVersion.WithResource("kafkasources"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Sources().V1().KafkaSources().Informer()}, nil + // Group=sources.knative.dev, Version=v1beta1 case sourcesv1beta1.SchemeGroupVersion.WithResource("kafkasources"): return &genericInformer{resource: resource.GroupResource(), informer: f.Sources().V1beta1().KafkaSources().Informer()}, nil diff --git a/control-plane/pkg/client/informers/externalversions/sources/interface.go b/control-plane/pkg/client/informers/externalversions/sources/interface.go index 243f3b6081..be1b5db48b 100644 --- a/control-plane/pkg/client/informers/externalversions/sources/interface.go +++ b/control-plane/pkg/client/informers/externalversions/sources/interface.go @@ -20,11 +20,14 @@ package sources import ( internalinterfaces "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/internalinterfaces" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/sources/v1" v1beta1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/sources/v1beta1" ) // Interface provides access to each of this group's versions. type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -40,6 +43,11 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/control-plane/pkg/client/informers/externalversions/sources/v1/interface.go b/control-plane/pkg/client/informers/externalversions/sources/v1/interface.go new file mode 100644 index 0000000000..2eb8c1d0ae --- /dev/null +++ b/control-plane/pkg/client/informers/externalversions/sources/v1/interface.go @@ -0,0 +1,45 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // KafkaSources returns a KafkaSourceInformer. + KafkaSources() KafkaSourceInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// KafkaSources returns a KafkaSourceInformer. +func (v *version) KafkaSources() KafkaSourceInformer { + return &kafkaSourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/control-plane/pkg/client/informers/externalversions/sources/v1/kafkasource.go b/control-plane/pkg/client/informers/externalversions/sources/v1/kafkasource.go new file mode 100644 index 0000000000..7511d53996 --- /dev/null +++ b/control-plane/pkg/client/informers/externalversions/sources/v1/kafkasource.go @@ -0,0 +1,90 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + sourcesv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1" + versioned "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned" + internalinterfaces "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/internalinterfaces" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/listers/sources/v1" +) + +// KafkaSourceInformer provides access to a shared informer and lister for +// KafkaSources. +type KafkaSourceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.KafkaSourceLister +} + +type kafkaSourceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewKafkaSourceInformer constructs a new informer for KafkaSource type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewKafkaSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredKafkaSourceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredKafkaSourceInformer constructs a new informer for KafkaSource type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredKafkaSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SourcesV1().KafkaSources(namespace).List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SourcesV1().KafkaSources(namespace).Watch(context.TODO(), options) + }, + }, + &sourcesv1.KafkaSource{}, + resyncPeriod, + indexers, + ) +} + +func (f *kafkaSourceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredKafkaSourceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *kafkaSourceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&sourcesv1.KafkaSource{}, f.defaultInformer) +} + +func (f *kafkaSourceInformer) Lister() v1.KafkaSourceLister { + return v1.NewKafkaSourceLister(f.Informer().GetIndexer()) +} diff --git a/control-plane/pkg/client/injection/informers/sources/v1/kafkasource/fake/fake.go b/control-plane/pkg/client/injection/informers/sources/v1/kafkasource/fake/fake.go new file mode 100644 index 0000000000..9c803e9541 --- /dev/null +++ b/control-plane/pkg/client/injection/informers/sources/v1/kafkasource/fake/fake.go @@ -0,0 +1,40 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + fake "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/informers/factory/fake" + kafkasource "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/informers/sources/v1/kafkasource" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" +) + +var Get = kafkasource.Get + +func init() { + injection.Fake.RegisterInformer(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := fake.Get(ctx) + inf := f.Sources().V1().KafkaSources() + return context.WithValue(ctx, kafkasource.Key{}, inf), inf.Informer() +} diff --git a/control-plane/pkg/client/injection/informers/sources/v1/kafkasource/filtered/fake/fake.go b/control-plane/pkg/client/injection/informers/sources/v1/kafkasource/filtered/fake/fake.go new file mode 100644 index 0000000000..94f8a72c64 --- /dev/null +++ b/control-plane/pkg/client/injection/informers/sources/v1/kafkasource/filtered/fake/fake.go @@ -0,0 +1,52 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by injection-gen. DO NOT EDIT. + +package fake + +import ( + context "context" + + factoryfiltered "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/informers/factory/filtered" + filtered "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/informers/sources/v1/kafkasource/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +var Get = filtered.Get + +func init() { + injection.Fake.RegisterFilteredInformers(withInformer) +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(factoryfiltered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := factoryfiltered.Get(ctx, selector) + inf := f.Sources().V1().KafkaSources() + ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} diff --git a/control-plane/pkg/client/injection/informers/sources/v1/kafkasource/filtered/kafkasource.go b/control-plane/pkg/client/injection/informers/sources/v1/kafkasource/filtered/kafkasource.go new file mode 100644 index 0000000000..b89a16d100 --- /dev/null +++ b/control-plane/pkg/client/injection/informers/sources/v1/kafkasource/filtered/kafkasource.go @@ -0,0 +1,65 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by injection-gen. DO NOT EDIT. + +package filtered + +import ( + context "context" + + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/sources/v1" + filtered "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/informers/factory/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterFilteredInformers(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct { + Selector string +} + +func withInformer(ctx context.Context) (context.Context, []controller.Informer) { + untyped := ctx.Value(filtered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + infs := []controller.Informer{} + for _, selector := range labelSelectors { + f := filtered.Get(ctx, selector) + inf := f.Sources().V1().KafkaSources() + ctx = context.WithValue(ctx, Key{Selector: selector}, inf) + infs = append(infs, inf.Informer()) + } + return ctx, infs +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context, selector string) v1.KafkaSourceInformer { + untyped := ctx.Value(Key{Selector: selector}) + if untyped == nil { + logging.FromContext(ctx).Panicf( + "Unable to fetch knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/sources/v1.KafkaSourceInformer with selector %s from context.", selector) + } + return untyped.(v1.KafkaSourceInformer) +} diff --git a/control-plane/pkg/client/injection/informers/sources/v1/kafkasource/kafkasource.go b/control-plane/pkg/client/injection/informers/sources/v1/kafkasource/kafkasource.go new file mode 100644 index 0000000000..ffd8f248db --- /dev/null +++ b/control-plane/pkg/client/injection/informers/sources/v1/kafkasource/kafkasource.go @@ -0,0 +1,52 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by injection-gen. DO NOT EDIT. + +package kafkasource + +import ( + context "context" + + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/sources/v1" + factory "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/informers/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Sources().V1().KafkaSources() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.KafkaSourceInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch knative.dev/eventing-kafka-broker/control-plane/pkg/client/informers/externalversions/sources/v1.KafkaSourceInformer from context.") + } + return untyped.(v1.KafkaSourceInformer) +} diff --git a/control-plane/pkg/client/injection/reconciler/sources/v1/kafkasource/controller.go b/control-plane/pkg/client/injection/reconciler/sources/v1/kafkasource/controller.go new file mode 100644 index 0000000000..e57e9869d4 --- /dev/null +++ b/control-plane/pkg/client/injection/reconciler/sources/v1/kafkasource/controller.go @@ -0,0 +1,170 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by injection-gen. DO NOT EDIT. + +package kafkasource + +import ( + context "context" + fmt "fmt" + reflect "reflect" + strings "strings" + + zap "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + v1 "k8s.io/client-go/kubernetes/typed/core/v1" + record "k8s.io/client-go/tools/record" + versionedscheme "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned/scheme" + client "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/client" + kafkasource "knative.dev/eventing-kafka-broker/control-plane/pkg/client/injection/informers/sources/v1/kafkasource" + kubeclient "knative.dev/pkg/client/injection/kube/client" + controller "knative.dev/pkg/controller" + logging "knative.dev/pkg/logging" + logkey "knative.dev/pkg/logging/logkey" + reconciler "knative.dev/pkg/reconciler" +) + +const ( + defaultControllerAgentName = "kafkasource-controller" + defaultFinalizerName = "kafkasources.sources.knative.dev" +) + +// NewImpl returns a controller.Impl that handles queuing and feeding work from +// the queue through an implementation of controller.Reconciler, delegating to +// the provided Interface and optional Finalizer methods. OptionsFn is used to return +// controller.ControllerOptions to be used by the internal reconciler. +func NewImpl(ctx context.Context, r Interface, optionsFns ...controller.OptionsFn) *controller.Impl { + logger := logging.FromContext(ctx) + + // Check the options function input. It should be 0 or 1. + if len(optionsFns) > 1 { + logger.Fatal("Up to one options function is supported, found: ", len(optionsFns)) + } + + kafkasourceInformer := kafkasource.Get(ctx) + + lister := kafkasourceInformer.Lister() + + var promoteFilterFunc func(obj interface{}) bool + var promoteFunc = func(bkt reconciler.Bucket) {} + + rec := &reconcilerImpl{ + LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + + // Signal promotion event + promoteFunc(bkt) + + all, err := lister.List(labels.Everything()) + if err != nil { + return err + } + for _, elt := range all { + if promoteFilterFunc != nil { + if ok := promoteFilterFunc(elt); !ok { + continue + } + } + enq(bkt, types.NamespacedName{ + Namespace: elt.GetNamespace(), + Name: elt.GetName(), + }) + } + return nil + }, + }, + Client: client.Get(ctx), + Lister: lister, + reconciler: r, + finalizerName: defaultFinalizerName, + } + + ctrType := reflect.TypeOf(r).Elem() + ctrTypeName := fmt.Sprintf("%s.%s", ctrType.PkgPath(), ctrType.Name()) + ctrTypeName = strings.ReplaceAll(ctrTypeName, "/", ".") + + logger = logger.With( + zap.String(logkey.ControllerType, ctrTypeName), + zap.String(logkey.Kind, "sources.knative.dev.KafkaSource"), + ) + + impl := controller.NewContext(ctx, rec, controller.ControllerOptions{WorkQueueName: ctrTypeName, Logger: logger}) + agentName := defaultControllerAgentName + + // Pass impl to the options. Save any optional results. + for _, fn := range optionsFns { + opts := fn(impl) + if opts.ConfigStore != nil { + rec.configStore = opts.ConfigStore + } + if opts.FinalizerName != "" { + rec.finalizerName = opts.FinalizerName + } + if opts.AgentName != "" { + agentName = opts.AgentName + } + if opts.SkipStatusUpdates { + rec.skipStatusUpdates = true + } + if opts.DemoteFunc != nil { + rec.DemoteFunc = opts.DemoteFunc + } + if opts.PromoteFilterFunc != nil { + promoteFilterFunc = opts.PromoteFilterFunc + } + if opts.PromoteFunc != nil { + promoteFunc = opts.PromoteFunc + } + } + + rec.Recorder = createRecorder(ctx, agentName) + + return impl +} + +func createRecorder(ctx context.Context, agentName string) record.EventRecorder { + logger := logging.FromContext(ctx) + + recorder := controller.GetEventRecorder(ctx) + if recorder == nil { + // Create event broadcaster + logger.Debug("Creating event broadcaster") + eventBroadcaster := record.NewBroadcaster() + watches := []watch.Interface{ + eventBroadcaster.StartLogging(logger.Named("event-broadcaster").Infof), + eventBroadcaster.StartRecordingToSink( + &v1.EventSinkImpl{Interface: kubeclient.Get(ctx).CoreV1().Events("")}), + } + recorder = eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: agentName}) + go func() { + <-ctx.Done() + for _, w := range watches { + w.Stop() + } + }() + } + + return recorder +} + +func init() { + versionedscheme.AddToScheme(scheme.Scheme) +} diff --git a/control-plane/pkg/client/injection/reconciler/sources/v1/kafkasource/reconciler.go b/control-plane/pkg/client/injection/reconciler/sources/v1/kafkasource/reconciler.go new file mode 100644 index 0000000000..c69ecb6374 --- /dev/null +++ b/control-plane/pkg/client/injection/reconciler/sources/v1/kafkasource/reconciler.go @@ -0,0 +1,440 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by injection-gen. DO NOT EDIT. + +package kafkasource + +import ( + context "context" + json "encoding/json" + fmt "fmt" + + zap "go.uber.org/zap" + "go.uber.org/zap/zapcore" + corev1 "k8s.io/api/core/v1" + equality "k8s.io/apimachinery/pkg/api/equality" + errors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + sets "k8s.io/apimachinery/pkg/util/sets" + record "k8s.io/client-go/tools/record" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1" + versioned "knative.dev/eventing-kafka-broker/control-plane/pkg/client/clientset/versioned" + sourcesv1 "knative.dev/eventing-kafka-broker/control-plane/pkg/client/listers/sources/v1" + controller "knative.dev/pkg/controller" + kmp "knative.dev/pkg/kmp" + logging "knative.dev/pkg/logging" + reconciler "knative.dev/pkg/reconciler" +) + +// Interface defines the strongly typed interfaces to be implemented by a +// controller reconciling v1.KafkaSource. +type Interface interface { + // ReconcileKind implements custom logic to reconcile v1.KafkaSource. Any changes + // to the objects .Status or .Finalizers will be propagated to the stored + // object. It is recommended that implementors do not call any update calls + // for the Kind inside of ReconcileKind, it is the responsibility of the calling + // controller to propagate those properties. The resource passed to ReconcileKind + // will always have an empty deletion timestamp. + ReconcileKind(ctx context.Context, o *v1.KafkaSource) reconciler.Event +} + +// Finalizer defines the strongly typed interfaces to be implemented by a +// controller finalizing v1.KafkaSource. +type Finalizer interface { + // FinalizeKind implements custom logic to finalize v1.KafkaSource. Any changes + // to the objects .Status or .Finalizers will be ignored. Returning a nil or + // Normal type reconciler.Event will allow the finalizer to be deleted on + // the resource. The resource passed to FinalizeKind will always have a set + // deletion timestamp. + FinalizeKind(ctx context.Context, o *v1.KafkaSource) reconciler.Event +} + +// ReadOnlyInterface defines the strongly typed interfaces to be implemented by a +// controller reconciling v1.KafkaSource if they want to process resources for which +// they are not the leader. +type ReadOnlyInterface interface { + // ObserveKind implements logic to observe v1.KafkaSource. + // This method should not write to the API. + ObserveKind(ctx context.Context, o *v1.KafkaSource) reconciler.Event +} + +type doReconcile func(ctx context.Context, o *v1.KafkaSource) reconciler.Event + +// reconcilerImpl implements controller.Reconciler for v1.KafkaSource resources. +type reconcilerImpl struct { + // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware. + reconciler.LeaderAwareFuncs + + // Client is used to write back status updates. + Client versioned.Interface + + // Listers index properties about resources. + Lister sourcesv1.KafkaSourceLister + + // Recorder is an event recorder for recording Event resources to the + // Kubernetes API. + Recorder record.EventRecorder + + // configStore allows for decorating a context with config maps. + // +optional + configStore reconciler.ConfigStore + + // reconciler is the implementation of the business logic of the resource. + reconciler Interface + + // finalizerName is the name of the finalizer to reconcile. + finalizerName string + + // skipStatusUpdates configures whether or not this reconciler automatically updates + // the status of the reconciled resource. + skipStatusUpdates bool +} + +// Check that our Reconciler implements controller.Reconciler. +var _ controller.Reconciler = (*reconcilerImpl)(nil) + +// Check that our generated Reconciler is always LeaderAware. +var _ reconciler.LeaderAware = (*reconcilerImpl)(nil) + +func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister sourcesv1.KafkaSourceLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler { + // Check the options function input. It should be 0 or 1. + if len(options) > 1 { + logger.Fatal("Up to one options struct is supported, found: ", len(options)) + } + + // Fail fast when users inadvertently implement the other LeaderAware interface. + // For the typed reconcilers, Promote shouldn't take any arguments. + if _, ok := r.(reconciler.LeaderAware); ok { + logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r) + } + + rec := &reconcilerImpl{ + LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ + PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { + all, err := lister.List(labels.Everything()) + if err != nil { + return err + } + for _, elt := range all { + // TODO: Consider letting users specify a filter in options. + enq(bkt, types.NamespacedName{ + Namespace: elt.GetNamespace(), + Name: elt.GetName(), + }) + } + return nil + }, + }, + Client: client, + Lister: lister, + Recorder: recorder, + reconciler: r, + finalizerName: defaultFinalizerName, + } + + for _, opts := range options { + if opts.ConfigStore != nil { + rec.configStore = opts.ConfigStore + } + if opts.FinalizerName != "" { + rec.finalizerName = opts.FinalizerName + } + if opts.SkipStatusUpdates { + rec.skipStatusUpdates = true + } + if opts.DemoteFunc != nil { + rec.DemoteFunc = opts.DemoteFunc + } + } + + return rec +} + +// Reconcile implements controller.Reconciler +func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + + // Initialize the reconciler state. This will convert the namespace/name + // string into a distinct namespace and name, determine if this instance of + // the reconciler is the leader, and any additional interfaces implemented + // by the reconciler. Returns an error is the resource key is invalid. + s, err := newState(key, r) + if err != nil { + logger.Error("Invalid resource key: ", key) + return nil + } + + // If we are not the leader, and we don't implement either ReadOnly + // observer interfaces, then take a fast-path out. + if s.isNotLeaderNorObserver() { + return controller.NewSkipKey(key) + } + + // If configStore is set, attach the frozen configuration to the context. + if r.configStore != nil { + ctx = r.configStore.ToContext(ctx) + } + + // Add the recorder to context. + ctx = controller.WithEventRecorder(ctx, r.Recorder) + + // Get the resource with this namespace/name. + + getter := r.Lister.KafkaSources(s.namespace) + + original, err := getter.Get(s.name) + + if errors.IsNotFound(err) { + // The resource may no longer exist, in which case we stop processing and call + // the ObserveDeletion handler if appropriate. + logger.Debugf("Resource %q no longer exists", key) + if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok { + return del.ObserveDeletion(ctx, types.NamespacedName{ + Namespace: s.namespace, + Name: s.name, + }) + } + return nil + } else if err != nil { + return err + } + + // Don't modify the informers copy. + resource := original.DeepCopy() + + var reconcileEvent reconciler.Event + + name, do := s.reconcileMethodFor(resource) + // Append the target method to the logger. + logger = logger.With(zap.String("targetMethod", name)) + switch name { + case reconciler.DoReconcileKind: + // Set and update the finalizer on resource if r.reconciler + // implements Finalizer. + if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil { + return fmt.Errorf("failed to set finalizers: %w", err) + } + + if !r.skipStatusUpdates { + reconciler.PreProcessReconcile(ctx, resource) + } + + // Reconcile this copy of the resource and then write back any status + // updates regardless of whether the reconciliation errored out. + reconcileEvent = do(ctx, resource) + + if !r.skipStatusUpdates { + reconciler.PostProcessReconcile(ctx, resource, original) + } + + case reconciler.DoFinalizeKind: + // For finalizing reconcilers, if this resource being marked for deletion + // and reconciled cleanly (nil or normal event), remove the finalizer. + reconcileEvent = do(ctx, resource) + + if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil { + return fmt.Errorf("failed to clear finalizers: %w", err) + } + + case reconciler.DoObserveKind: + // Observe any changes to this resource, since we are not the leader. + reconcileEvent = do(ctx, resource) + + } + + // Synchronize the status. + switch { + case r.skipStatusUpdates: + // This reconciler implementation is configured to skip resource updates. + // This may mean this reconciler does not observe spec, but reconciles external changes. + case equality.Semantic.DeepEqual(original.Status, resource.Status): + // If we didn't change anything then don't call updateStatus. + // This is important because the copy we loaded from the injectionInformer's + // cache may be stale and we don't want to overwrite a prior update + // to status with this stale state. + case !s.isLeader: + // High-availability reconcilers may have many replicas watching the resource, but only + // the elected leader is expected to write modifications. + logger.Warn("Saw status changes when we aren't the leader!") + default: + if err = r.updateStatus(ctx, logger, original, resource); err != nil { + logger.Warnw("Failed to update resource status", zap.Error(err)) + r.Recorder.Eventf(resource, corev1.EventTypeWarning, "UpdateFailed", + "Failed to update status for %q: %v", resource.Name, err) + return err + } + } + + // Report the reconciler event, if any. + if reconcileEvent != nil { + var event *reconciler.ReconcilerEvent + if reconciler.EventAs(reconcileEvent, &event) { + logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) + r.Recorder.Event(resource, event.EventType, event.Reason, event.Error()) + + // the event was wrapped inside an error, consider the reconciliation as failed + if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { + return reconcileEvent + } + return nil + } + + if controller.IsSkipKey(reconcileEvent) { + // This is a wrapped error, don't emit an event. + } else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok { + // This is a wrapped error, don't emit an event. + } else { + logger.Errorw("Returned an error", zap.Error(reconcileEvent)) + r.Recorder.Event(resource, corev1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + } + return reconcileEvent + } + + return nil +} + +func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1.KafkaSource, desired *v1.KafkaSource) error { + existing = existing.DeepCopy() + return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { + // The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API. + if attempts > 0 { + + getter := r.Client.SourcesV1().KafkaSources(desired.Namespace) + + existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{}) + if err != nil { + return err + } + } + + // If there's nothing to update, just return. + if equality.Semantic.DeepEqual(existing.Status, desired.Status) { + return nil + } + + if logger.Desugar().Core().Enabled(zapcore.DebugLevel) { + if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" { + logger.Debug("Updating status with: ", diff) + } + } + + existing.Status = desired.Status + + updater := r.Client.SourcesV1().KafkaSources(existing.Namespace) + + _, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{}) + return err + }) +} + +// updateFinalizersFiltered will update the Finalizers of the resource. +// TODO: this method could be generic and sync all finalizers. For now it only +// updates defaultFinalizerName or its override. +func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1.KafkaSource, desiredFinalizers sets.Set[string]) (*v1.KafkaSource, error) { + // Don't modify the informers copy. + existing := resource.DeepCopy() + + var finalizers []string + + // If there's nothing to update, just return. + existingFinalizers := sets.New[string](existing.Finalizers...) + + if desiredFinalizers.Has(r.finalizerName) { + if existingFinalizers.Has(r.finalizerName) { + // Nothing to do. + return resource, nil + } + // Add the finalizer. + finalizers = append(existing.Finalizers, r.finalizerName) + } else { + if !existingFinalizers.Has(r.finalizerName) { + // Nothing to do. + return resource, nil + } + // Remove the finalizer. + existingFinalizers.Delete(r.finalizerName) + finalizers = sets.List(existingFinalizers) + } + + mergePatch := map[string]interface{}{ + "metadata": map[string]interface{}{ + "finalizers": finalizers, + "resourceVersion": existing.ResourceVersion, + }, + } + + patch, err := json.Marshal(mergePatch) + if err != nil { + return resource, err + } + + patcher := r.Client.SourcesV1().KafkaSources(resource.Namespace) + + resourceName := resource.Name + updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{}) + if err != nil { + r.Recorder.Eventf(existing, corev1.EventTypeWarning, "FinalizerUpdateFailed", + "Failed to update finalizers for %q: %v", resourceName, err) + } else { + r.Recorder.Eventf(updated, corev1.EventTypeNormal, "FinalizerUpdate", + "Updated %q finalizers", resource.GetName()) + } + return updated, err +} + +func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1.KafkaSource) (*v1.KafkaSource, error) { + if _, ok := r.reconciler.(Finalizer); !ok { + return resource, nil + } + + finalizers := sets.New[string](resource.Finalizers...) + + // If this resource is not being deleted, mark the finalizer. + if resource.GetDeletionTimestamp().IsZero() { + finalizers.Insert(r.finalizerName) + } + + // Synchronize the finalizers filtered by r.finalizerName. + return r.updateFinalizersFiltered(ctx, resource, finalizers) +} + +func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1.KafkaSource, reconcileEvent reconciler.Event) (*v1.KafkaSource, error) { + if _, ok := r.reconciler.(Finalizer); !ok { + return resource, nil + } + if resource.GetDeletionTimestamp().IsZero() { + return resource, nil + } + + finalizers := sets.New[string](resource.Finalizers...) + + if reconcileEvent != nil { + var event *reconciler.ReconcilerEvent + if reconciler.EventAs(reconcileEvent, &event) { + if event.EventType == corev1.EventTypeNormal { + finalizers.Delete(r.finalizerName) + } + } + } else { + finalizers.Delete(r.finalizerName) + } + + // Synchronize the finalizers filtered by r.finalizerName. + return r.updateFinalizersFiltered(ctx, resource, finalizers) +} diff --git a/control-plane/pkg/client/injection/reconciler/sources/v1/kafkasource/state.go b/control-plane/pkg/client/injection/reconciler/sources/v1/kafkasource/state.go new file mode 100644 index 0000000000..b1dcc46272 --- /dev/null +++ b/control-plane/pkg/client/injection/reconciler/sources/v1/kafkasource/state.go @@ -0,0 +1,97 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by injection-gen. DO NOT EDIT. + +package kafkasource + +import ( + fmt "fmt" + + types "k8s.io/apimachinery/pkg/types" + cache "k8s.io/client-go/tools/cache" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1" + reconciler "knative.dev/pkg/reconciler" +) + +// state is used to track the state of a reconciler in a single run. +type state struct { + // key is the original reconciliation key from the queue. + key string + // namespace is the namespace split from the reconciliation key. + namespace string + // name is the name split from the reconciliation key. + name string + // reconciler is the reconciler. + reconciler Interface + // roi is the read only interface cast of the reconciler. + roi ReadOnlyInterface + // isROI (Read Only Interface) the reconciler only observes reconciliation. + isROI bool + // isLeader the instance of the reconciler is the elected leader. + isLeader bool +} + +func newState(key string, r *reconcilerImpl) (*state, error) { + // Convert the namespace/name string into a distinct namespace and name. + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + return nil, fmt.Errorf("invalid resource key: %s", key) + } + + roi, isROI := r.reconciler.(ReadOnlyInterface) + + isLeader := r.IsLeaderFor(types.NamespacedName{ + Namespace: namespace, + Name: name, + }) + + return &state{ + key: key, + namespace: namespace, + name: name, + reconciler: r.reconciler, + roi: roi, + isROI: isROI, + isLeader: isLeader, + }, nil +} + +// isNotLeaderNorObserver checks to see if this reconciler with the current +// state is enabled to do any work or not. +// isNotLeaderNorObserver returns true when there is no work possible for the +// reconciler. +func (s *state) isNotLeaderNorObserver() bool { + if !s.isLeader && !s.isROI { + // If we are not the leader, and we don't implement the ReadOnly + // interface, then take a fast-path out. + return true + } + return false +} + +func (s *state) reconcileMethodFor(o *v1.KafkaSource) (string, doReconcile) { + if o.GetDeletionTimestamp().IsZero() { + if s.isLeader { + return reconciler.DoReconcileKind, s.reconciler.ReconcileKind + } else if s.isROI { + return reconciler.DoObserveKind, s.roi.ObserveKind + } + } else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok { + return reconciler.DoFinalizeKind, fin.FinalizeKind + } + return "unknown", nil +} diff --git a/control-plane/pkg/client/listers/bindings/v1/expansion_generated.go b/control-plane/pkg/client/listers/bindings/v1/expansion_generated.go new file mode 100644 index 0000000000..5deb3e67d7 --- /dev/null +++ b/control-plane/pkg/client/listers/bindings/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// KafkaBindingListerExpansion allows custom methods to be added to +// KafkaBindingLister. +type KafkaBindingListerExpansion interface{} + +// KafkaBindingNamespaceListerExpansion allows custom methods to be added to +// KafkaBindingNamespaceLister. +type KafkaBindingNamespaceListerExpansion interface{} diff --git a/control-plane/pkg/client/listers/bindings/v1/kafkabinding.go b/control-plane/pkg/client/listers/bindings/v1/kafkabinding.go new file mode 100644 index 0000000000..8c9a81a05c --- /dev/null +++ b/control-plane/pkg/client/listers/bindings/v1/kafkabinding.go @@ -0,0 +1,99 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/bindings/v1" +) + +// KafkaBindingLister helps list KafkaBindings. +// All objects returned here must be treated as read-only. +type KafkaBindingLister interface { + // List lists all KafkaBindings in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.KafkaBinding, err error) + // KafkaBindings returns an object that can list and get KafkaBindings. + KafkaBindings(namespace string) KafkaBindingNamespaceLister + KafkaBindingListerExpansion +} + +// kafkaBindingLister implements the KafkaBindingLister interface. +type kafkaBindingLister struct { + indexer cache.Indexer +} + +// NewKafkaBindingLister returns a new KafkaBindingLister. +func NewKafkaBindingLister(indexer cache.Indexer) KafkaBindingLister { + return &kafkaBindingLister{indexer: indexer} +} + +// List lists all KafkaBindings in the indexer. +func (s *kafkaBindingLister) List(selector labels.Selector) (ret []*v1.KafkaBinding, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.KafkaBinding)) + }) + return ret, err +} + +// KafkaBindings returns an object that can list and get KafkaBindings. +func (s *kafkaBindingLister) KafkaBindings(namespace string) KafkaBindingNamespaceLister { + return kafkaBindingNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// KafkaBindingNamespaceLister helps list and get KafkaBindings. +// All objects returned here must be treated as read-only. +type KafkaBindingNamespaceLister interface { + // List lists all KafkaBindings in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.KafkaBinding, err error) + // Get retrieves the KafkaBinding from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.KafkaBinding, error) + KafkaBindingNamespaceListerExpansion +} + +// kafkaBindingNamespaceLister implements the KafkaBindingNamespaceLister +// interface. +type kafkaBindingNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all KafkaBindings in the indexer for a given namespace. +func (s kafkaBindingNamespaceLister) List(selector labels.Selector) (ret []*v1.KafkaBinding, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.KafkaBinding)) + }) + return ret, err +} + +// Get retrieves the KafkaBinding from the indexer for a given namespace and name. +func (s kafkaBindingNamespaceLister) Get(name string) (*v1.KafkaBinding, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("kafkabinding"), name) + } + return obj.(*v1.KafkaBinding), nil +} diff --git a/control-plane/pkg/client/listers/sources/v1/expansion_generated.go b/control-plane/pkg/client/listers/sources/v1/expansion_generated.go new file mode 100644 index 0000000000..e13d109760 --- /dev/null +++ b/control-plane/pkg/client/listers/sources/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// KafkaSourceListerExpansion allows custom methods to be added to +// KafkaSourceLister. +type KafkaSourceListerExpansion interface{} + +// KafkaSourceNamespaceListerExpansion allows custom methods to be added to +// KafkaSourceNamespaceLister. +type KafkaSourceNamespaceListerExpansion interface{} diff --git a/control-plane/pkg/client/listers/sources/v1/kafkasource.go b/control-plane/pkg/client/listers/sources/v1/kafkasource.go new file mode 100644 index 0000000000..c36f19e088 --- /dev/null +++ b/control-plane/pkg/client/listers/sources/v1/kafkasource.go @@ -0,0 +1,99 @@ +/* + * Copyright 2021 The Knative Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1 "knative.dev/eventing-kafka-broker/control-plane/pkg/apis/sources/v1" +) + +// KafkaSourceLister helps list KafkaSources. +// All objects returned here must be treated as read-only. +type KafkaSourceLister interface { + // List lists all KafkaSources in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.KafkaSource, err error) + // KafkaSources returns an object that can list and get KafkaSources. + KafkaSources(namespace string) KafkaSourceNamespaceLister + KafkaSourceListerExpansion +} + +// kafkaSourceLister implements the KafkaSourceLister interface. +type kafkaSourceLister struct { + indexer cache.Indexer +} + +// NewKafkaSourceLister returns a new KafkaSourceLister. +func NewKafkaSourceLister(indexer cache.Indexer) KafkaSourceLister { + return &kafkaSourceLister{indexer: indexer} +} + +// List lists all KafkaSources in the indexer. +func (s *kafkaSourceLister) List(selector labels.Selector) (ret []*v1.KafkaSource, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.KafkaSource)) + }) + return ret, err +} + +// KafkaSources returns an object that can list and get KafkaSources. +func (s *kafkaSourceLister) KafkaSources(namespace string) KafkaSourceNamespaceLister { + return kafkaSourceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// KafkaSourceNamespaceLister helps list and get KafkaSources. +// All objects returned here must be treated as read-only. +type KafkaSourceNamespaceLister interface { + // List lists all KafkaSources in the indexer for a given namespace. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1.KafkaSource, err error) + // Get retrieves the KafkaSource from the indexer for a given namespace and name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1.KafkaSource, error) + KafkaSourceNamespaceListerExpansion +} + +// kafkaSourceNamespaceLister implements the KafkaSourceNamespaceLister +// interface. +type kafkaSourceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all KafkaSources in the indexer for a given namespace. +func (s kafkaSourceNamespaceLister) List(selector labels.Selector) (ret []*v1.KafkaSource, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.KafkaSource)) + }) + return ret, err +} + +// Get retrieves the KafkaSource from the indexer for a given namespace and name. +func (s kafkaSourceNamespaceLister) Get(name string) (*v1.KafkaSource, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("kafkasource"), name) + } + return obj.(*v1.KafkaSource), nil +} diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index 9e680fbb65..e3e1d0a518 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -56,7 +56,7 @@ group "Knative Codegen" # Knative Injection "${KNATIVE_CODEGEN_PKG}"/hack/generate-knative.sh "injection" \ knative.dev/eventing-kafka-broker/control-plane/pkg/client knative.dev/eventing-kafka-broker/control-plane/pkg/apis \ - "eventing:v1alpha1 messaging:v1beta1 sources:v1beta1 bindings:v1beta1 internalskafkaeventing:v1alpha1" \ + "eventing:v1alpha1 messaging:v1beta1 sources:v1 sources:v1beta1 bindings:v1beta1 internalskafkaeventing:v1alpha1" \ --go-header-file "${REPO_ROOT_DIR}"/hack/boilerplate/boilerplate.go.txt "${KNATIVE_CODEGEN_PKG}"/hack/generate-knative.sh "injection" \ diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/interface.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/interface.go new file mode 100644 index 0000000000..dcb16e7814 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/interface.go @@ -0,0 +1,54 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package apiextensions + +import ( + v1 "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1" + v1beta1 "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1" + internalinterfaces "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1/customresourcedefinition.go new file mode 100644 index 0000000000..7d1b571112 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1/customresourcedefinition.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + time "time" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + internalinterfaces "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces" + v1 "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CustomResourceDefinitionInformer provides access to a shared informer and lister for +// CustomResourceDefinitions. +type CustomResourceDefinitionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.CustomResourceDefinitionLister +} + +type customResourceDefinitionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCustomResourceDefinitionInformer constructs a new informer for CustomResourceDefinition type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCustomResourceDefinitionInformer(client clientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCustomResourceDefinitionInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCustomResourceDefinitionInformer constructs a new informer for CustomResourceDefinition type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCustomResourceDefinitionInformer(client clientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ApiextensionsV1().CustomResourceDefinitions().List(context.TODO(), options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ApiextensionsV1().CustomResourceDefinitions().Watch(context.TODO(), options) + }, + }, + &apiextensionsv1.CustomResourceDefinition{}, + resyncPeriod, + indexers, + ) +} + +func (f *customResourceDefinitionInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCustomResourceDefinitionInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *customResourceDefinitionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiextensionsv1.CustomResourceDefinition{}, f.defaultInformer) +} + +func (f *customResourceDefinitionInformer) Lister() v1.CustomResourceDefinitionLister { + return v1.NewCustomResourceDefinitionLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1/interface.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1/interface.go new file mode 100644 index 0000000000..d96e2099ae --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // CustomResourceDefinitions returns a CustomResourceDefinitionInformer. + CustomResourceDefinitions() CustomResourceDefinitionInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// CustomResourceDefinitions returns a CustomResourceDefinitionInformer. +func (v *version) CustomResourceDefinitions() CustomResourceDefinitionInformer { + return &customResourceDefinitionInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go new file mode 100644 index 0000000000..489c87ae90 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/customresourcedefinition.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + time "time" + + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + internalinterfaces "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces" + v1beta1 "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// CustomResourceDefinitionInformer provides access to a shared informer and lister for +// CustomResourceDefinitions. +type CustomResourceDefinitionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.CustomResourceDefinitionLister +} + +type customResourceDefinitionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCustomResourceDefinitionInformer constructs a new informer for CustomResourceDefinition type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCustomResourceDefinitionInformer(client clientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCustomResourceDefinitionInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCustomResourceDefinitionInformer constructs a new informer for CustomResourceDefinition type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCustomResourceDefinitionInformer(client clientset.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ApiextensionsV1beta1().CustomResourceDefinitions().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ApiextensionsV1beta1().CustomResourceDefinitions().Watch(context.TODO(), options) + }, + }, + &apiextensionsv1beta1.CustomResourceDefinition{}, + resyncPeriod, + indexers, + ) +} + +func (f *customResourceDefinitionInformer) defaultInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCustomResourceDefinitionInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *customResourceDefinitionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apiextensionsv1beta1.CustomResourceDefinition{}, f.defaultInformer) +} + +func (f *customResourceDefinitionInformer) Lister() v1beta1.CustomResourceDefinitionLister { + return v1beta1.NewCustomResourceDefinitionLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/interface.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/interface.go new file mode 100644 index 0000000000..f78edbb593 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1beta1 + +import ( + internalinterfaces "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // CustomResourceDefinitions returns a CustomResourceDefinitionInformer. + CustomResourceDefinitions() CustomResourceDefinitionInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// CustomResourceDefinitions returns a CustomResourceDefinitionInformer. +func (v *version) CustomResourceDefinitions() CustomResourceDefinitionInformer { + return &customResourceDefinitionInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/factory.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/factory.go new file mode 100644 index 0000000000..90319bd215 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/factory.go @@ -0,0 +1,261 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apiextensions "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions" + internalinterfaces "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client clientset.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + transform cache.TransformFunc + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client clientset.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client clientset.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client clientset.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + if f.shuttingDown { + return + } + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() + f.startedInformers[informerType] = true + } + } +} + +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + + Apiextensions() apiextensions.Interface +} + +func (f *sharedInformerFactory) Apiextensions() apiextensions.Interface { + return apiextensions.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/generic.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/generic.go new file mode 100644 index 0000000000..86f79cd417 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/generic.go @@ -0,0 +1,67 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=apiextensions.k8s.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("customresourcedefinitions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apiextensions().V1().CustomResourceDefinitions().Informer()}, nil + + // Group=apiextensions.k8s.io, Version=v1beta1 + case v1beta1.SchemeGroupVersion.WithResource("customresourcedefinitions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apiextensions().V1beta1().CustomResourceDefinitions().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000..da6eadaa7f --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + clientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes clientset.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(clientset.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/customresourcedefinition.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/customresourcedefinition.go new file mode 100644 index 0000000000..c57fd40d8f --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/customresourcedefinition.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CustomResourceDefinitionLister helps list CustomResourceDefinitions. +// All objects returned here must be treated as read-only. +type CustomResourceDefinitionLister interface { + // List lists all CustomResourceDefinitions in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1beta1.CustomResourceDefinition, err error) + // Get retrieves the CustomResourceDefinition from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1beta1.CustomResourceDefinition, error) + CustomResourceDefinitionListerExpansion +} + +// customResourceDefinitionLister implements the CustomResourceDefinitionLister interface. +type customResourceDefinitionLister struct { + indexer cache.Indexer +} + +// NewCustomResourceDefinitionLister returns a new CustomResourceDefinitionLister. +func NewCustomResourceDefinitionLister(indexer cache.Indexer) CustomResourceDefinitionLister { + return &customResourceDefinitionLister{indexer: indexer} +} + +// List lists all CustomResourceDefinitions in the indexer. +func (s *customResourceDefinitionLister) List(selector labels.Selector) (ret []*v1beta1.CustomResourceDefinition, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.CustomResourceDefinition)) + }) + return ret, err +} + +// Get retrieves the CustomResourceDefinition from the index for a given name. +func (s *customResourceDefinitionLister) Get(name string) (*v1beta1.CustomResourceDefinition, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("customresourcedefinition"), name) + } + return obj.(*v1beta1.CustomResourceDefinition), nil +} diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/expansion_generated.go b/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/expansion_generated.go new file mode 100644 index 0000000000..429782deb0 --- /dev/null +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1beta1 + +// CustomResourceDefinitionListerExpansion allows custom methods to be added to +// CustomResourceDefinitionLister. +type CustomResourceDefinitionListerExpansion interface{} diff --git a/vendor/k8s.io/code-generator/generate-groups.sh b/vendor/k8s.io/code-generator/generate-groups.sh old mode 100644 new mode 100755 diff --git a/vendor/k8s.io/code-generator/generate-internal-groups.sh b/vendor/k8s.io/code-generator/generate-internal-groups.sh old mode 100644 new mode 100755 diff --git a/vendor/knative.dev/pkg/client/injection/apiextensions/informers/apiextensions/v1/customresourcedefinition/customresourcedefinition.go b/vendor/knative.dev/pkg/client/injection/apiextensions/informers/apiextensions/v1/customresourcedefinition/customresourcedefinition.go new file mode 100644 index 0000000000..5df3694231 --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/apiextensions/informers/apiextensions/v1/customresourcedefinition/customresourcedefinition.go @@ -0,0 +1,52 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package customresourcedefinition + +import ( + context "context" + + v1 "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1" + factory "knative.dev/pkg/client/injection/apiextensions/informers/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Apiextensions().V1().CustomResourceDefinitions() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.CustomResourceDefinitionInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1.CustomResourceDefinitionInformer from context.") + } + return untyped.(v1.CustomResourceDefinitionInformer) +} diff --git a/vendor/knative.dev/pkg/client/injection/apiextensions/informers/factory/factory.go b/vendor/knative.dev/pkg/client/injection/apiextensions/informers/factory/factory.go new file mode 100644 index 0000000000..d3f0cdde2e --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/apiextensions/informers/factory/factory.go @@ -0,0 +1,56 @@ +/* +Copyright 2022 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package factory + +import ( + context "context" + + externalversions "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" + client "knative.dev/pkg/client/injection/apiextensions/client" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformerFactory(withInformerFactory) +} + +// Key is used as the key for associating information with a context.Context. +type Key struct{} + +func withInformerFactory(ctx context.Context) context.Context { + c := client.Get(ctx) + opts := make([]externalversions.SharedInformerOption, 0, 1) + if injection.HasNamespaceScope(ctx) { + opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx))) + } + return context.WithValue(ctx, Key{}, + externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...)) +} + +// Get extracts the InformerFactory from the context. +func Get(ctx context.Context) externalversions.SharedInformerFactory { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions.SharedInformerFactory from context.") + } + return untyped.(externalversions.SharedInformerFactory) +} diff --git a/vendor/knative.dev/pkg/hack/generate-knative.sh b/vendor/knative.dev/pkg/hack/generate-knative.sh old mode 100644 new mode 100755 diff --git a/vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/controller.go b/vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/controller.go new file mode 100644 index 0000000000..8f200d3c23 --- /dev/null +++ b/vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/controller.go @@ -0,0 +1,162 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" + "knative.dev/pkg/apis" + apixclient "knative.dev/pkg/client/injection/apiextensions/client" + crdinformer "knative.dev/pkg/client/injection/apiextensions/informers/apiextensions/v1/customresourcedefinition" + "knative.dev/pkg/controller" + secretinformer "knative.dev/pkg/injection/clients/namespacedkube/informers/core/v1/secret" + "knative.dev/pkg/logging" + pkgreconciler "knative.dev/pkg/reconciler" + "knative.dev/pkg/system" + "knative.dev/pkg/webhook" +) + +// ConvertibleObject defines the functionality our API types +// are required to implement in order to be convertible from +// one version to another +// +// Optionally if the object implements apis.Defaultable the +// ConversionController will apply defaults before returning +// the response +type ConvertibleObject interface { + // ConvertTo(ctx, to) + // ConvertFrom(ctx, from) + apis.Convertible + + // DeepCopyObject() + // GetObjectKind() => SetGroupVersionKind(gvk) + runtime.Object +} + +// GroupKindConversion specifies how a specific Kind for a given +// group should be converted +type GroupKindConversion struct { + // DefinitionName specifies the CustomResourceDefinition that should + // be reconciled with by the controller. + // + // The conversion webhook configuration will be updated + // when the CA bundle changes + DefinitionName string + + // HubVersion specifies which version of the CustomResource supports + // conversions to and from all types + // + // It is expected that the Zygotes map contains an entry for the + // specified HubVersion + HubVersion string + + // Zygotes contains a map of version strings (ie. v1, v2) to empty + // ConvertibleObject objects + // + // During a conversion request these zygotes will be deep copied + // and manipulated using the apis.Convertible interface + Zygotes map[string]ConvertibleObject +} + +// NewConversionController returns a K8s controller that will +// will reconcile CustomResourceDefinitions and update their +// conversion webhook attributes such as path & CA bundle. +// +// Additionally the controller's Reconciler implements +// webhook.ConversionController for the purposes of converting +// resources between different versions +func NewConversionController( + ctx context.Context, + path string, + kinds map[schema.GroupKind]GroupKindConversion, + withContext func(context.Context) context.Context, +) *controller.Impl { + opts := []OptionFunc{ + WithPath(path), + WithWrapContext(withContext), + WithKinds(kinds), + } + + return newController(ctx, opts...) +} + +func newController(ctx context.Context, optsFunc ...OptionFunc) *controller.Impl { + secretInformer := secretinformer.Get(ctx) + crdInformer := crdinformer.Get(ctx) + client := apixclient.Get(ctx) + woptions := webhook.GetOptions(ctx) + + opts := &options{} + + for _, f := range optsFunc { + f(opts) + } + + r := &reconciler{ + LeaderAwareFuncs: pkgreconciler.LeaderAwareFuncs{ + // Have this reconciler enqueue our types whenever it becomes leader. + PromoteFunc: func(bkt pkgreconciler.Bucket, enq func(pkgreconciler.Bucket, types.NamespacedName)) error { + for _, gkc := range opts.kinds { + name := gkc.DefinitionName + enq(bkt, types.NamespacedName{Name: name}) + } + return nil + }, + }, + + kinds: opts.kinds, + path: opts.path, + secretName: woptions.SecretName, + withContext: opts.wc, + + client: client, + secretLister: secretInformer.Lister(), + crdLister: crdInformer.Lister(), + } + + logger := logging.FromContext(ctx) + controllerOptions := woptions.ControllerOptions + if controllerOptions == nil { + const queueName = "ConversionWebhook" + controllerOptions = &controller.ControllerOptions{WorkQueueName: queueName, Logger: logger.Named(queueName)} + } + c := controller.NewContext(ctx, r, *controllerOptions) + + // Reconciler when the named CRDs change. + for _, gkc := range opts.kinds { + name := gkc.DefinitionName + + crdInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.FilterWithName(name), + Handler: controller.HandleAll(c.Enqueue), + }) + + sentinel := c.EnqueueSentinel(types.NamespacedName{Name: name}) + + // Reconcile when the cert bundle changes. + secretInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.FilterWithNameAndNamespace(system.Namespace(), woptions.SecretName), + Handler: controller.HandleAll(sentinel), + }) + } + + return c +} diff --git a/vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/conversion.go b/vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/conversion.go new file mode 100644 index 0000000000..29830a8751 --- /dev/null +++ b/vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/conversion.go @@ -0,0 +1,206 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "context" + "encoding/json" + "fmt" + + "go.uber.org/zap" + + apixv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "knative.dev/pkg/apis" + "knative.dev/pkg/kmeta" + "knative.dev/pkg/logging" + "knative.dev/pkg/logging/logkey" +) + +// Convert implements webhook.ConversionController +func (r *reconciler) Convert( + ctx context.Context, + req *apixv1.ConversionRequest, +) *apixv1.ConversionResponse { + if r.withContext != nil { + ctx = r.withContext(ctx) + } + + res := &apixv1.ConversionResponse{ + UID: req.UID, + Result: metav1.Status{ + Status: metav1.StatusSuccess, + }, + } + + result := make([]runtime.RawExtension, 0, len(req.Objects)) + + for _, obj := range req.Objects { + converted, err := r.convert(ctx, obj, req.DesiredAPIVersion) + if err != nil { + logging.FromContext(ctx).Errorw("Conversion failed", zap.Error(err)) + res.Result.Status = metav1.StatusFailure + res.Result.Message = err.Error() + break + } + + result = append(result, converted) + } + + res.ConvertedObjects = result + return res +} + +func (r *reconciler) convert( + ctx context.Context, + inRaw runtime.RawExtension, + targetVersion string, +) (runtime.RawExtension, error) { + logger := logging.FromContext(ctx) + var ret runtime.RawExtension + + inGVK, err := parseGVK(inRaw) + if err != nil { + return ret, err + } + + inGK := inGVK.GroupKind() + conv, ok := r.kinds[inGK] + if !ok { + return ret, fmt.Errorf("no conversion support for type %s", formatGK(inGVK.GroupKind())) + } + + outGVK, err := parseAPIVersion(targetVersion, inGK.Kind) + if err != nil { + return ret, err + } + + inZygote, ok := conv.Zygotes[inGVK.Version] + if !ok { + return ret, fmt.Errorf("conversion not supported for type %s", formatGVK(inGVK)) + } + outZygote, ok := conv.Zygotes[outGVK.Version] + if !ok { + return ret, fmt.Errorf("conversion not supported for type %s", formatGVK(outGVK)) + } + hubZygote, ok := conv.Zygotes[conv.HubVersion] + if !ok { + return ret, fmt.Errorf("conversion not supported for type %s", formatGK(inGVK.GroupKind())) + } + + in := inZygote.DeepCopyObject().(ConvertibleObject) + hub := hubZygote.DeepCopyObject().(ConvertibleObject) + out := outZygote.DeepCopyObject().(ConvertibleObject) + + hubGVK := inGVK.GroupKind().WithVersion(conv.HubVersion) + + logger = logger.With( + zap.String("inputType", formatGVK(inGVK)), + zap.String("outputType", formatGVK(outGVK)), + zap.String("hubType", formatGVK(hubGVK)), + ) + + // TODO(dprotaso) - potentially error on unknown fields + if err = json.Unmarshal(inRaw.Raw, &in); err != nil { + return ret, fmt.Errorf("unable to unmarshal input: %w", err) + } + + if acc, err := kmeta.DeletionHandlingAccessor(in); err == nil { + // TODO: right now we don't convert any non-namespaced objects. If we ever do that + // this needs to updated to deal with it. + logger = logger.With(zap.String(logkey.Key, acc.GetNamespace()+"/"+acc.GetName())) + } else { + logger.Infof("Could not get Accessor for %s: %v", formatGK(inGVK.GroupKind()), err) + } + ctx = logging.WithLogger(ctx, logger) + + if inGVK.Version == conv.HubVersion { + hub = in + } else if err = hub.ConvertFrom(ctx, in); err != nil { + return ret, fmt.Errorf("conversion failed to version %s for type %s - %w", outGVK.Version, formatGVK(inGVK), err) + } + + if outGVK.Version == conv.HubVersion { + out = hub + } else if err = hub.ConvertTo(ctx, out); err != nil { + return ret, fmt.Errorf("conversion failed to version %s for type %s - %w", outGVK.Version, formatGVK(inGVK), err) + } + + out.GetObjectKind().SetGroupVersionKind(outGVK) + + if defaultable, ok := out.(apis.Defaultable); ok { + defaultable.SetDefaults(ctx) + } + + if ret.Raw, err = json.Marshal(out); err != nil { + return ret, fmt.Errorf("unable to marshal output: %w", err) + } + return ret, nil +} + +func parseGVK(in runtime.RawExtension) (schema.GroupVersionKind, error) { + var ( + typeMeta metav1.TypeMeta + gvk schema.GroupVersionKind + ) + + if err := json.Unmarshal(in.Raw, &typeMeta); err != nil { + return gvk, fmt.Errorf("error parsing type meta %q - %w", string(in.Raw), err) + } + + gv, err := schema.ParseGroupVersion(typeMeta.APIVersion) + if err != nil { + return gvk, fmt.Errorf("error parsing GV %q: %w", typeMeta.APIVersion, err) + } + gvk = gv.WithKind(typeMeta.Kind) + + if gvk.Group == "" || gvk.Version == "" || gvk.Kind == "" { + return gvk, fmt.Errorf("invalid GroupVersionKind %v", gvk) + } + + return gvk, nil +} + +func parseAPIVersion(apiVersion string, kind string) (schema.GroupVersionKind, error) { + gv, err := schema.ParseGroupVersion(apiVersion) + if err != nil { + err = fmt.Errorf("desired API version %q is not valid", apiVersion) + return schema.GroupVersionKind{}, err + } + + if !isValidGV(gv) { + err = fmt.Errorf("desired API version %q is not valid", apiVersion) + return schema.GroupVersionKind{}, err + } + + return gv.WithKind(kind), nil +} + +func formatGVK(gvk schema.GroupVersionKind) string { + return fmt.Sprintf("[kind=%s group=%s version=%s]", gvk.Kind, gvk.Group, gvk.Version) +} + +func formatGK(gk schema.GroupKind) string { + return fmt.Sprintf("[kind=%s group=%s]", gk.Kind, gk.Group) +} + +func isValidGV(gk schema.GroupVersion) bool { + return gk.Group != "" && gk.Version != "" +} diff --git a/vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/options.go b/vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/options.go new file mode 100644 index 0000000000..c36df504b7 --- /dev/null +++ b/vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/options.go @@ -0,0 +1,49 @@ +/* +Copyright 2023 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type options struct { + path string + wc func(context.Context) context.Context + kinds map[schema.GroupKind]GroupKindConversion +} + +type OptionFunc func(*options) + +func WithKinds(kinds map[schema.GroupKind]GroupKindConversion) OptionFunc { + return func(o *options) { + o.kinds = kinds + } +} + +func WithPath(path string) OptionFunc { + return func(o *options) { + o.path = path + } +} + +func WithWrapContext(f func(context.Context) context.Context) OptionFunc { + return func(o *options) { + o.wc = f + } +} diff --git a/vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/reconciler.go b/vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/reconciler.go new file mode 100644 index 0000000000..f23b055872 --- /dev/null +++ b/vendor/knative.dev/pkg/webhook/resourcesemantics/conversion/reconciler.go @@ -0,0 +1,121 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "context" + "fmt" + + "go.uber.org/zap" + apixv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apixclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + apixlisters "k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + corelisters "k8s.io/client-go/listers/core/v1" + "knative.dev/pkg/controller" + "knative.dev/pkg/kmp" + "knative.dev/pkg/logging" + "knative.dev/pkg/ptr" + pkgreconciler "knative.dev/pkg/reconciler" + "knative.dev/pkg/system" + "knative.dev/pkg/webhook" + certresources "knative.dev/pkg/webhook/certificates/resources" +) + +type reconciler struct { + pkgreconciler.LeaderAwareFuncs + + kinds map[schema.GroupKind]GroupKindConversion + path string + secretName string + withContext func(context.Context) context.Context + + secretLister corelisters.SecretLister + crdLister apixlisters.CustomResourceDefinitionLister + client apixclient.Interface +} + +var ( + _ webhook.ConversionController = (*reconciler)(nil) + _ controller.Reconciler = (*reconciler)(nil) + _ pkgreconciler.LeaderAware = (*reconciler)(nil) +) + +// Path implements webhook.ConversionController +func (r *reconciler) Path() string { + return r.path +} + +// Reconciler implements controller.Reconciler +func (r *reconciler) Reconcile(ctx context.Context, key string) error { + logger := logging.FromContext(ctx) + + if !r.IsLeaderFor(types.NamespacedName{Name: key}) { + return controller.NewSkipKey(key) + } + + // Look up the webhook secret, and fetch the CA cert bundle. + secret, err := r.secretLister.Secrets(system.Namespace()).Get(r.secretName) + if err != nil { + logger.Errorw("Error fetching secret", zap.Error(err)) + return err + } + + cacert, ok := secret.Data[certresources.CACert] + if !ok { + return fmt.Errorf("secret %q is missing %q key", r.secretName, certresources.CACert) + } + + return r.reconcileCRD(ctx, cacert, key) +} + +func (r *reconciler) reconcileCRD(ctx context.Context, cacert []byte, key string) error { + logger := logging.FromContext(ctx) + + configuredCRD, err := r.crdLister.Get(key) + if err != nil { + return fmt.Errorf("error retrieving crd: %w", err) + } + + crd := configuredCRD.DeepCopy() + + if crd.Spec.Conversion == nil || + crd.Spec.Conversion.Strategy != apixv1.WebhookConverter || + crd.Spec.Conversion.Webhook.ClientConfig == nil || + crd.Spec.Conversion.Webhook.ClientConfig.Service == nil { + return fmt.Errorf("custom resource %q isn't configured for webhook conversion", key) + } + + crd.Spec.Conversion.Webhook.ClientConfig.CABundle = cacert + crd.Spec.Conversion.Webhook.ClientConfig.Service.Path = ptr.String(r.path) + + if ok, err := kmp.SafeEqual(configuredCRD, crd); err != nil { + return fmt.Errorf("error diffing custom resource definitions: %w", err) + } else if !ok { + logger.Infof("updating CRD") + crdClient := r.client.ApiextensionsV1().CustomResourceDefinitions() + if _, err := crdClient.Update(ctx, crd, metav1.UpdateOptions{}); err != nil { + return fmt.Errorf("failed to update webhook: %w", err) + } + } else { + logger.Info("CRD is up to date") + } + + return nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 7deb0edf07..6d631e58f2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -702,7 +702,13 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextension k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1/fake k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1/fake +k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions +k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions +k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1 +k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/apiextensions/v1beta1 +k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions/internalinterfaces k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1 +k8s.io/apiextensions-apiserver/pkg/client/listers/apiextensions/v1beta1 # k8s.io/apimachinery v0.30.3 ## explicit; go 1.22.0 k8s.io/apimachinery/pkg/api/apitesting/fuzzer @@ -1363,6 +1369,8 @@ knative.dev/pkg/apis/testing/fuzzer knative.dev/pkg/changeset knative.dev/pkg/client/injection/apiextensions/client knative.dev/pkg/client/injection/apiextensions/client/fake +knative.dev/pkg/client/injection/apiextensions/informers/apiextensions/v1/customresourcedefinition +knative.dev/pkg/client/injection/apiextensions/informers/factory knative.dev/pkg/client/injection/ducks/duck/v1/addressable knative.dev/pkg/client/injection/ducks/duck/v1/addressable/fake knative.dev/pkg/client/injection/ducks/duck/v1/authstatus @@ -1464,6 +1472,7 @@ knative.dev/pkg/webhook/certificates knative.dev/pkg/webhook/certificates/resources knative.dev/pkg/webhook/json knative.dev/pkg/webhook/resourcesemantics +knative.dev/pkg/webhook/resourcesemantics/conversion knative.dev/pkg/webhook/resourcesemantics/defaulting knative.dev/pkg/webhook/resourcesemantics/validation # knative.dev/reconciler-test v0.0.0-20240926123451-87d857060042