From a1191ffd610bc49857a6478c069c2cbe12c1a004 Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Fri, 5 Mar 2021 05:58:05 +0100 Subject: [PATCH 01/17] Add Subscription prober Signed-off-by: Ahmed Abdalla --- .../reconciler/controller/controller.go | 78 ++- .../reconciler/controller/kafkachannel.go | 27 +- pkg/channel/consolidated/status/status.go | 469 ++++++++++++++++++ 3 files changed, 550 insertions(+), 24 deletions(-) create mode 100644 pkg/channel/consolidated/status/status.go diff --git a/pkg/channel/consolidated/reconciler/controller/controller.go b/pkg/channel/consolidated/reconciler/controller/controller.go index 309b6f1779..400e952d7a 100644 --- a/pkg/channel/consolidated/reconciler/controller/controller.go +++ b/pkg/channel/consolidated/reconciler/controller/controller.go @@ -18,6 +18,20 @@ package controller import ( "context" + "fmt" + "net/url" + + v12 "knative.dev/eventing/pkg/apis/duck/v1" + + "knative.dev/eventing-kafka/pkg/channel/consolidated/status" + + "knative.dev/eventing/pkg/apis/eventing" + + "k8s.io/apimachinery/pkg/types" + + "k8s.io/apimachinery/pkg/util/sets" + + corev1listers "k8s.io/client-go/listers/core/v1" "github.com/kelseyhightower/envconfig" "go.uber.org/zap" @@ -36,19 +50,64 @@ import ( "knative.dev/pkg/logging" "knative.dev/pkg/system" + "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" kafkaChannelClient "knative.dev/eventing-kafka/pkg/client/injection/client" "knative.dev/eventing-kafka/pkg/client/injection/informers/messaging/v1beta1/kafkachannel" kafkaChannelReconciler "knative.dev/eventing-kafka/pkg/client/injection/reconciler/messaging/v1beta1/kafkachannel" eventingClient "knative.dev/eventing/pkg/client/injection/client" ) +type TargetLister struct { +} + +func (t *TargetLister) ListProbeTargets(ctx context.Context, kc v1beta1.KafkaChannel) ([]status.ProbeTarget, error) { + scope, ok := kc.Annotations[eventing.ScopeAnnotationKey] + if !ok { + scope = scopeCluster + } + + dispatcherNamespace := system.Namespace() + if scope == scopeNamespace { + dispatcherNamespace = kc.Namespace + } + + // Get the Dispatcher Service Endpoints and propagate the status to the Channel + // endpoints has the same name as the service, so not a bug. + eps, err := endpoints.Get(ctx).Lister().Endpoints(dispatcherNamespace).Get(dispatcherName) + if err != nil { + return nil, fmt.Errorf("failed to get internal service: %w", err) + } + var readyIPs []string + + for _, sub := range eps.Subsets { + for _, address := range sub.Addresses { + readyIPs = append(readyIPs, address.IP) + } + } + + if len(readyIPs) == 0 { + return nil, fmt.Errorf("no gateway pods available") + } + + u, _ := url.Parse(fmt.Sprintf("http://%s.%s/%s/%s", dispatcherName, dispatcherNamespace, kc.Namespace, kc.Name)) + + uls := []*url.URL{u} + + return []status.ProbeTarget{ + { + PodIPs: sets.NewString(readyIPs...), + PodPort: "8080", Port: "8080", URLs: uls, + }, + }, nil +} + // NewController initializes the controller and is called by the generated code. // Registers event handlers to enqueue events. func NewController( ctx context.Context, cmw configmap.Watcher, ) *controller.Impl { - + logger := logging.FromContext(ctx) kafkaChannelInformer := kafkachannel.Get(ctx) deploymentInformer := deployment.Get(ctx) endpointsInformer := endpoints.Get(ctx) @@ -70,8 +129,6 @@ func NewController( roleBindingLister: roleBindingInformer.Lister(), } - logger := logging.FromContext(ctx) - env := &envConfig{} if err := envconfig.Process("", env); err != nil { logger.Panicf("unable to process Kafka channel's required environment variables: %v", err) @@ -85,6 +142,16 @@ func NewController( impl := kafkaChannelReconciler.NewImpl(ctx, r) + statusProber := status.NewProber( + logger.Named("status-manager"), + NewProbeTargetLister(logger, endpointsInformer.Lister()), + func(c v1beta1.KafkaChannel, s v12.SubscriberSpec) { + logger.Debugf("Ready callback triggered for channel: %s/%s subscription: %s", c.Namespace, c.Name, string(s.UID)) + impl.EnqueueKey(types.NamespacedName{Namespace: c.Namespace, Name: c.Name}) + }, + ) + r.statusManager = statusProber + statusProber.Start(ctx.Done()) // Get and Watch the Kakfa config map and dynamically update Kafka configuration. err := commonconfig.InitializeKafkaConfigMapWatcher(ctx, cmw, logger, r.updateKafkaConfig, system.Namespace()) if err != nil { @@ -127,3 +194,8 @@ func NewController( return impl } + +func NewProbeTargetLister(logger *zap.SugaredLogger, lister corev1listers.EndpointsLister) status.ProbeTargetLister { + tl := TargetLister{} + return &tl +} diff --git a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go index a48c6d1bb2..ec65753165 100644 --- a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go +++ b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go @@ -19,9 +19,10 @@ package controller import ( "context" "fmt" - "strings" "time" + "knative.dev/eventing-kafka/pkg/channel/consolidated/status" + "knative.dev/eventing-kafka/pkg/channel/consolidated/kafka" "knative.dev/eventing-kafka/pkg/common/client" "knative.dev/eventing-kafka/pkg/common/constants" @@ -93,7 +94,7 @@ func newDispatcherServiceWarn(err error) pkgreconciler.Event { } func newServiceAccountWarn(err error) pkgreconciler.Event { - return pkgreconciler.NewEvent(corev1.EventTypeWarning, "DispatcherServiceAccountFailed", "Reconciling dispatcher ServiceAccount failed: %s", err) + return pkgreconciler.NewEvent(corev1.EventTypeWarning, "Dispatc erServiceAccountFailed", "Reconciling dispatcher ServiceAccount failed: %s", err) } func newRoleBindingWarn(err error) pkgreconciler.Event { @@ -130,6 +131,7 @@ type Reconciler struct { endpointsLister corev1listers.EndpointsLister serviceAccountLister corev1listers.ServiceAccountLister roleBindingLister rbacv1listers.RoleBindingLister + statusManager *status.Prober } var ( @@ -256,29 +258,12 @@ func (r *Reconciler) ReconcileKind(ctx context.Context, kc *v1beta1.KafkaChannel return newReconciledNormal(kc.Namespace, kc.Name) } -func (r *Reconciler) setupSubscriptionStatusWatcher(ctx context.Context, channel *v1beta1.KafkaChannel) error { - var err error - groupIDPrefix := fmt.Sprintf("kafka.%s.%s", channel.Namespace, channel.Name) - - m := func(cg string) bool { - return strings.HasPrefix(cg, groupIDPrefix) - } - err = r.consumerGroupWatcher.Watch(string(channel.ObjectMeta.UID), func() { - err := r.markSubscriptionReadiness(ctx, channel, r.consumerGroupWatcher.List(m)) - if err != nil { - logging.FromContext(ctx).Errorw("error updating subscription readiness", zap.Error(err)) - } - }) - return err -} - -func (r *Reconciler) markSubscriptionReadiness(ctx context.Context, ch *v1beta1.KafkaChannel, cgs []string) error { +func (r *Reconciler) setupSubscriptionStatusWatcher(ctx context.Context, ch *v1beta1.KafkaChannel) error { after := ch.DeepCopy() after.Status.Subscribers = make([]v1.SubscriberStatus, 0) for _, s := range ch.Spec.Subscribers { - cg := fmt.Sprintf("kafka.%s.%s.%s", ch.Namespace, ch.Name, s.UID) - if Find(cgs, cg) { + if r, _ := r.statusManager.IsReady(ctx, *ch, s); r { logging.FromContext(ctx).Debugw("marking subscription", zap.Any("subscription", s)) after.Status.Subscribers = append(after.Status.Subscribers, v1.SubscriberStatus{ UID: s.UID, diff --git a/pkg/channel/consolidated/status/status.go b/pkg/channel/consolidated/status/status.go new file mode 100644 index 0000000000..7aea5ebd4d --- /dev/null +++ b/pkg/channel/consolidated/status/status.go @@ -0,0 +1,469 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "context" + "crypto/sha256" + "crypto/tls" + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "reflect" + "sync" + "time" + + v12 "knative.dev/eventing/pkg/apis/duck/v1" + + "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" + + "go.uber.org/atomic" + "go.uber.org/zap" + "golang.org/x/time/rate" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/workqueue" + + "knative.dev/networking/pkg/prober" + "knative.dev/pkg/logging" +) + +const ( + // probeConcurrency defines how many probing calls can be issued simultaneously + probeConcurrency = 15 + // probeTimeout defines the maximum amount of time a request will wait + probeTimeout = 1 * time.Second + // initialDelay defines the delay before enqueuing a probing request the first time. + // It gives times for the change to propagate and prevents unnecessary retries. + initialDelay = 200 * time.Millisecond +) + +var dialContext = (&net.Dialer{Timeout: probeTimeout}).DialContext + +// targetState represents the probing state of a subscription +type targetState struct { + hash string + sub v12.SubscriberSpec + ch v1beta1.KafkaChannel + + // pendingCount is the number of pods that haven't been successfully probed yet + pendingCount atomic.Int32 + lastAccessed time.Time + + cancel func() +} + +// podState represents the probing state of a Pod (for a specific subscription) +type podState struct { + // pendingCount is the number of probes for the Pod + pendingCount atomic.Int32 + + cancel func() +} + +// cancelContext is a pair of a Context and its cancel function +type cancelContext struct { + context context.Context + cancel func() +} + +type workItem struct { + targetStates *targetState + podState *podState + context context.Context + url *url.URL + podIP string + podPort string + logger *zap.SugaredLogger +} + +// ProbeTarget contains the URLs to probes for a set of Pod IPs serving out of the same port. +type ProbeTarget struct { + PodIPs sets.String + PodPort string + Port string + URLs []*url.URL +} + +// ProbeTargetLister lists all the targets that requires probing. +type ProbeTargetLister interface { + // ListProbeTargets returns a list of targets to be probed + ListProbeTargets(ctx context.Context, ch v1beta1.KafkaChannel) ([]ProbeTarget, error) +} + +// Prober provides a way to check if a VirtualService is ready by probing the Envoy pods +// handling that VirtualService. +type Prober struct { + logger *zap.SugaredLogger + + // mu guards targetStates and podContexts + mu sync.Mutex + targetStates map[types.UID]*targetState + podContexts map[string]cancelContext + + workQueue workqueue.RateLimitingInterface + + targetLister ProbeTargetLister + + readyCallback func(v1beta1.KafkaChannel, v12.SubscriberSpec) + + probeConcurrency int + + opts []interface{} +} + +// NewProber creates a new instance of Prober +func NewProber( + logger *zap.SugaredLogger, + targetLister ProbeTargetLister, + readyCallback func(v1beta1.KafkaChannel, v12.SubscriberSpec), opts ...interface{}) *Prober { + return &Prober{ + logger: logger, + targetStates: make(map[types.UID]*targetState), + podContexts: make(map[string]cancelContext), + workQueue: workqueue.NewNamedRateLimitingQueue( + workqueue.NewMaxOfRateLimiter( + // Per item exponential backoff + workqueue.NewItemExponentialFailureRateLimiter(50*time.Millisecond, 30*time.Second), + // Global rate limiter + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(50), 100)}, + ), + "ProbingQueue"), + targetLister: targetLister, + readyCallback: readyCallback, + probeConcurrency: probeConcurrency, + opts: opts, + } +} + +func computeHash(sub v12.SubscriberSpec) ([sha256.Size]byte, error) { + bytes, err := json.Marshal(sub) + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("failed to serialize Subscription: %w", err) + } + return sha256.Sum256(bytes), nil +} + +func (m *Prober) IsReady(ctx context.Context, ch v1beta1.KafkaChannel, sub v12.SubscriberSpec) (bool, error) { + subscriptionKey := sub.UID + logger := logging.FromContext(ctx) + + bytes, err := computeHash(sub) + if err != nil { + return false, fmt.Errorf("failed to compute the hash of the Subscription: %w", err) + } + hash := fmt.Sprintf("%x", bytes) + + if ready, ok := func() (bool, bool) { + m.mu.Lock() + defer m.mu.Unlock() + if state, ok := m.targetStates[subscriptionKey]; ok { + if state.hash == hash { + state.lastAccessed = time.Now() + return state.pendingCount.Load() == 0, true + } + + // Cancel the polling for the outdated version + state.cancel() + delete(m.targetStates, subscriptionKey) + } + return false, false + }(); ok { + return ready, nil + } + + subCtx, cancel := context.WithCancel(context.Background()) + subscriptionState := &targetState{ + hash: hash, + sub: sub, + ch: ch, + lastAccessed: time.Now(), + cancel: cancel, + } + + // Get the probe targets and group them by IP + targets, err := m.targetLister.ListProbeTargets(ctx, ch) + if err != nil { + return false, err + } + workItems := make(map[string][]*workItem) + for _, target := range targets { + for ip := range target.PodIPs { + for _, url := range target.URLs { + workItems[ip] = append(workItems[ip], &workItem{ + targetStates: subscriptionState, + url: url, + podIP: ip, + podPort: target.PodPort, + logger: logger, + }) + } + } + } + + subscriptionState.pendingCount.Store(int32(len(workItems))) + + for ip, ipWorkItems := range workItems { + // Get or create the context for that IP + ipCtx := func() context.Context { + m.mu.Lock() + defer m.mu.Unlock() + cancelCtx, ok := m.podContexts[ip] + if !ok { + ctx, cancel := context.WithCancel(context.Background()) + cancelCtx = cancelContext{ + context: ctx, + cancel: cancel, + } + m.podContexts[ip] = cancelCtx + } + return cancelCtx.context + }() + + podCtx, cancel := context.WithCancel(subCtx) + podState := &podState{ + pendingCount: *atomic.NewInt32(int32(len(ipWorkItems))), + cancel: cancel, + } + + // Quick and dirty way to join two contexts (i.e. podCtx is cancelled when either subCtx or ipCtx are cancelled) + go func() { + select { + case <-podCtx.Done(): + // This is the actual context, there is nothing to do except + // break to avoid leaking this goroutine. + break + case <-ipCtx.Done(): + // Cancel podCtx + cancel() + } + }() + + // Update the states when probing is cancelled + go func() { + <-podCtx.Done() + m.onProbingCancellation(subscriptionState, podState) + }() + + for _, wi := range ipWorkItems { + wi.podState = podState + wi.context = podCtx + m.workQueue.AddAfter(wi, initialDelay) + logger.Infof("Queuing probe for %s, IP: %s:%s (depth: %d)", + wi.url, wi.podIP, wi.podPort, m.workQueue.Len()) + } + } + + func() { + m.mu.Lock() + defer m.mu.Unlock() + m.targetStates[subscriptionKey] = subscriptionState + }() + return len(workItems) == 0, nil +} + +// Start starts the Manager background operations +func (m *Prober) Start(done <-chan struct{}) chan struct{} { + var wg sync.WaitGroup + + // Start the worker goroutines + for i := 0; i < m.probeConcurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for m.processWorkItem() { + } + }() + } + + // Stop processing the queue when cancelled + go func() { + <-done + m.workQueue.ShutDown() + }() + + // Return a channel closed when all work is done + ch := make(chan struct{}) + go func() { + wg.Wait() + close(ch) + }() + return ch +} + +// CancelProbing cancels probing of the provided Subscription +func (m *Prober) CancelProbing(sub v12.SubscriberSpec) { + key := sub.UID + m.mu.Lock() + defer m.mu.Unlock() + if state, ok := m.targetStates[key]; ok { + state.cancel() + delete(m.targetStates, key) + } +} + +// CancelPodProbing cancels probing of the provided Pod IP. +// +// TODO(#6269): make this cancellation based on Pod x port instead of just Pod. +func (m *Prober) CancelPodProbing(obj interface{}) { + if pod, ok := obj.(*corev1.Pod); ok { + m.mu.Lock() + defer m.mu.Unlock() + + if ctx, ok := m.podContexts[pod.Status.PodIP]; ok { + ctx.cancel() + delete(m.podContexts, pod.Status.PodIP) + } + } +} + +// processWorkItem processes a single work item from workQueue. +// It returns false when there is no more items to process, true otherwise. +func (m *Prober) processWorkItem() bool { + obj, shutdown := m.workQueue.Get() + if shutdown { + return false + } + + defer m.workQueue.Done(obj) + + // Crash if the item is not of the expected type + item, ok := obj.(*workItem) + if !ok { + m.logger.Fatalf("Unexpected work item type: want: %s, got: %s\n", + reflect.TypeOf(&workItem{}).Name(), reflect.TypeOf(obj).Name()) + } + item.logger.Infof("Processing probe for %s, IP: %s:%s (depth: %d)", + item.url, item.podIP, item.podPort, m.workQueue.Len()) + + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.TLSClientConfig = &tls.Config{ + //nolint:gosec + // We only want to know that the Gateway is configured, not that the configuration is valid. + // Therefore, we can safely ignore any TLS certificate validation. + InsecureSkipVerify: true, + } + transport.DialContext = func(ctx context.Context, network, addr string) (conn net.Conn, e error) { + // Requests with the IP as hostname and the Host header set do no pass client-side validation + // because the HTTP client validates that the hostname (not the Host header) matches the server + // TLS certificate Common Name or Alternative Names. Therefore, http.Request.URL is set to the + // hostname and it is substituted it here with the target IP. + return dialContext(ctx, network, net.JoinHostPort(item.podIP, item.podPort)) + } + + probeURL := deepCopy(item.url) + + ctx, cancel := context.WithTimeout(item.context, probeTimeout) + defer cancel() + var opts []interface{} + opts = append(opts, m.opts...) + opts = append(opts, m.probeVerifier(item)) + + ok, err := prober.Do( + ctx, + transport, + probeURL.String(), + opts...) + + // In case of cancellation, drop the work item + select { + case <-item.context.Done(): + m.workQueue.Forget(obj) + return true + default: + } + + if err != nil || !ok { + // In case of error, enqueue for retry + m.workQueue.AddRateLimited(obj) + item.logger.Errorf("Probing of %s failed, IP: %s:%s, ready: %t, error: %v (depth: %d)", + item.url, item.podIP, item.podPort, ok, err, m.workQueue.Len()) + } else { + m.onProbingSuccess(item.targetStates, item.podState) + } + return true +} + +func (m *Prober) onProbingSuccess(subscriptionState *targetState, podState *podState) { + // The last probe call for the Pod succeeded, the Pod is ready + if podState.pendingCount.Dec() == 0 { + // Unlock the goroutine blocked on <-podCtx.Done() + podState.cancel() + + // This is the last pod being successfully probed, the subscription is ready + if subscriptionState.pendingCount.Dec() == 0 { + m.readyCallback(subscriptionState.ch, subscriptionState.sub) + } + } +} + +func (m *Prober) onProbingCancellation(subscriptionState *targetState, podState *podState) { + for { + pendingCount := podState.pendingCount.Load() + if pendingCount <= 0 { + // Probing succeeded, nothing to do + return + } + + // Attempt to set pendingCount to 0. + if podState.pendingCount.CAS(pendingCount, 0) { + // This is the last pod being successfully probed, the subscription is ready + if subscriptionState.pendingCount.Dec() == 0 { + m.readyCallback(subscriptionState.ch, subscriptionState.sub) + } + return + } + } +} + +func (m *Prober) probeVerifier(item *workItem) prober.Verifier { + return func(r *http.Response, _ []byte) (bool, error) { + //TODO Check if we need to use a hash + switch r.StatusCode { + case http.StatusOK: + /** + {"my-kafka-channel":["90713ffd-f527-42bf-b158-57630b68ebe2","a2041ec2-3295-4cd8-ac31-e699ab08273e","d3d70a79-8528-4df6-a812-3b559380cf08","db536b74-45f8-41cd-ab3e-7e3f60ed9e35","eb3aeee9-7cb5-4cad-b4c4-424e436dac9f"]} + */ + var subscriptions = make(map[string][]string) + json.NewDecoder(r.Body).Decode(subscriptions) + if subs, ok := subscriptions[item.targetStates.ch.Name]; ok && sets.NewString(subs...).Has(string(item.targetStates.sub.UID)) { + return true, nil + } else { + //TODO return and error if the channel doesn't exist? + return false, nil + } + case http.StatusNotFound, http.StatusServiceUnavailable: + return false, fmt.Errorf("unexpected status code: want %v, got %v", http.StatusOK, r.StatusCode) + default: + item.logger.Errorf("Probing of %s abandoned, IP: %s:%s: the response status is %v, expected one of: %v", + item.url, item.podIP, item.podPort, r.StatusCode, + []int{http.StatusOK, http.StatusNotFound, http.StatusServiceUnavailable}) + return true, nil + } + } +} + +// deepCopy copies a URL into a new one +func deepCopy(in *url.URL) *url.URL { + // Safe to ignore the error since this is a deep copy + newURL, _ := url.Parse(in.String()) + return newURL +} From 1d7c24cceb3667b2b88d8920c104b7b985c0325c Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Mon, 8 Mar 2021 08:19:45 +0100 Subject: [PATCH 02/17] Fix endpoints informer in cons. KafkaChannel controller --- go.mod | 2 + go.sum | 10 +- pkg/channel/consolidated/kafka/admin.go | 109 ---------- pkg/channel/consolidated/kafka/admin_test.go | 93 -------- .../controller/consumer_group_watcher.go | 201 ------------------ .../controller/consumer_group_watcher_test.go | 84 -------- .../reconciler/controller/controller.go | 14 +- .../reconciler/controller/kafkachannel.go | 23 +- pkg/channel/consolidated/status/status.go | 17 +- pkg/common/consumer/consumer_handler.go | 3 +- .../knative.dev/networking/pkg/prober/LICENSE | 201 ++++++++++++++++++ .../gogo/protobuf/proto/text_parser.go | 2 +- .../golang.org/x/tools/cmd/goimports/doc.go | 4 + .../golang.org/x/tools/go/ast/astutil/util.go | 4 + vendor/golang.org/x/tools/imports/forward.go | 4 + .../x/tools/internal/gocommand/version.go | 13 +- .../x/tools/internal/imports/mod.go | 4 + .../x/tools/internal/imports/mod_cache.go | 4 + vendor/knative.dev/networking/LICENSE | 201 ++++++++++++++++++ .../knative.dev/networking/pkg/prober/doc.go | 18 ++ .../networking/pkg/prober/prober.go | 199 +++++++++++++++++ vendor/modules.txt | 8 +- 22 files changed, 697 insertions(+), 521 deletions(-) delete mode 100644 pkg/channel/consolidated/kafka/admin.go delete mode 100644 pkg/channel/consolidated/kafka/admin_test.go delete mode 100644 pkg/channel/consolidated/reconciler/controller/consumer_group_watcher.go delete mode 100644 pkg/channel/consolidated/reconciler/controller/consumer_group_watcher_test.go create mode 100644 third_party/VENDOR-LICENSE/knative.dev/networking/pkg/prober/LICENSE create mode 100644 vendor/knative.dev/networking/LICENSE create mode 100644 vendor/knative.dev/networking/pkg/prober/doc.go create mode 100644 vendor/knative.dev/networking/pkg/prober/prober.go diff --git a/go.mod b/go.mod index 65d9ec4c76..d0c7ad1bfa 100644 --- a/go.mod +++ b/go.mod @@ -25,6 +25,7 @@ require ( github.com/stretchr/testify v1.6.1 github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c go.opencensus.io v0.22.6 + go.uber.org/atomic v1.7.0 go.uber.org/zap v1.16.0 golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897 // indirect golang.org/x/sync v0.0.0-20201207232520-09787c993a3a @@ -35,5 +36,6 @@ require ( k8s.io/utils v0.0.0-20200729134348-d5654de09c73 knative.dev/eventing v0.21.1-0.20210309092525-37e702765dbc knative.dev/hack v0.0.0-20210305150220-f99a25560134 + knative.dev/networking v0.0.0-20210304153916-f813b5904943 knative.dev/pkg v0.0.0-20210309024624-0f8d8de5949d ) diff --git a/go.sum b/go.sum index fe12d25c9b..5494498f96 100644 --- a/go.sum +++ b/go.sum @@ -330,6 +330,8 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -510,6 +512,7 @@ github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dv github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.0 h1:wJbzvpYMVGG9iTI9VxpnNZfd4DzMPoCWze3GgSqz8yg= @@ -687,6 +690,7 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/dnscache v0.0.0-20210201191234-295bba877686/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= @@ -1035,6 +1039,7 @@ golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -1043,6 +1048,8 @@ golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818 h1:u2ssHESKr0HP2d1wlnjMKH+V/22Vg1lGCVuXmOYU1qA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1244,7 +1251,8 @@ knative.dev/hack v0.0.0-20210203173706-8368e1f6eacf h1:u4cY4jr2LYvhoz/1HBWEPsMiL knative.dev/hack v0.0.0-20210203173706-8368e1f6eacf/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= knative.dev/hack v0.0.0-20210305150220-f99a25560134 h1:lUllAp28TkevQIgWrsjow8ZLnXJy3AraRzGFm/ffD2c= knative.dev/hack v0.0.0-20210305150220-f99a25560134/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= -knative.dev/pkg v0.0.0-20210303192215-8fbab7ebb77b h1:AynUh7MBUe44E60vh0vIyF2Bes4AOoTT2ejy9xrF0FU= +knative.dev/networking v0.0.0-20210304153916-f813b5904943 h1:EEAnCZzqVoTNNPMYyONXqOD3e/45OPVahA4jm8ET4/g= +knative.dev/networking v0.0.0-20210304153916-f813b5904943/go.mod h1:G+KCelFuLocMrnfayHoxqsFG+IYX4t8To1celZes77k= knative.dev/pkg v0.0.0-20210303192215-8fbab7ebb77b/go.mod h1:TJSdebQOWX5N2bszohOYVi0H1QtXbtlYLuMghAFBMhY= knative.dev/pkg v0.0.0-20210308052421-737401c38b22/go.mod h1:fP690UCcs5x+qQVhjJxNcm97OWIiUdFC1dqbD3Gsp64= knative.dev/pkg v0.0.0-20210309024624-0f8d8de5949d h1:2Uc3qyLRLIYOqJrGGKFkJc69X+cxlhoH3jk7p4b4KFM= diff --git a/pkg/channel/consolidated/kafka/admin.go b/pkg/channel/consolidated/kafka/admin.go deleted file mode 100644 index 9bfb1ea140..0000000000 --- a/pkg/channel/consolidated/kafka/admin.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "context" - "fmt" - "math" - "sync" - "time" - - "k8s.io/apimachinery/pkg/util/sets" - - "go.uber.org/zap" - - "github.com/Shopify/sarama" - "knative.dev/pkg/logging" -) - -var mutex sync.Mutex - -type ClusterAdminFactory func() (sarama.ClusterAdmin, error) - -type AdminClient interface { - // ListConsumerGroups Lists the consumer groups - ListConsumerGroups() ([]string, error) -} - -// AdminClientManager manages a ClusterAdmin connection and recreates one when needed -// it is made to overcome https://github.com/Shopify/sarama/issues/1162 -type AdminClientManager struct { - logger *zap.SugaredLogger - adminFactory ClusterAdminFactory - clusterAdmin sarama.ClusterAdmin -} - -func NewAdminClient(ctx context.Context, caFactory ClusterAdminFactory) (AdminClient, error) { - logger := logging.FromContext(ctx) - logger.Debug("Creating a new AdminClient") - kafkaClusterAdmin, err := caFactory() - if err != nil { - logger.Errorw("error while creating ClusterAdmin", zap.Error(err)) - return nil, err - } - return &AdminClientManager{ - logger: logger, - adminFactory: caFactory, - clusterAdmin: kafkaClusterAdmin, - }, nil -} - -// ListConsumerGroups Returns a list of the consumer groups. -// -// In the occasion of errors, there will be a retry with an exponential backoff. -// Due to a known issue in Sarama ClusterAdmin https://github.com/Shopify/sarama/issues/1162, -// a new ClusterAdmin will be created with every retry until the call succeeds or -// the timeout is reached. -func (c *AdminClientManager) ListConsumerGroups() ([]string, error) { - c.logger.Debug("Attempting to list consumer group") - mutex.Lock() - defer mutex.Unlock() - r := 0 - // This gives us around ~13min of exponential backoff - max := 13 - cgsMap, err := c.clusterAdmin.ListConsumerGroups() - for err != nil && r <= max { - // There's on error, let's retry and presume a new ClusterAdmin can fix it - - // Calculate incremental delay following this https://docs.aws.amazon.com/general/latest/gr/api-retries.html - t := int(math.Pow(2, float64(r)) * 100) - d := time.Duration(t) * time.Millisecond - c.logger.Errorw("listing consumer group failed. Refreshing the ClusterAdmin and retrying.", - zap.Error(err), - zap.Duration("retry after", d), - zap.Int("Retry attempt", r), - zap.Int("Max retries", max), - ) - time.Sleep(d) - - // let's reconnect and try again - c.clusterAdmin, err = c.adminFactory() - r += 1 - if err != nil { - // skip this attempt - continue - } - cgsMap, err = c.clusterAdmin.ListConsumerGroups() - } - - if r > max { - return nil, fmt.Errorf("failed to refresh the culster admin and retry: %v", err) - } - - return sets.StringKeySet(cgsMap).List(), nil -} diff --git a/pkg/channel/consolidated/kafka/admin_test.go b/pkg/channel/consolidated/kafka/admin_test.go deleted file mode 100644 index 3e671c22a2..0000000000 --- a/pkg/channel/consolidated/kafka/admin_test.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package kafka - -import ( - "fmt" - "sync" - "testing" - "time" - - "github.com/Shopify/sarama" - pkgtesting "knative.dev/pkg/logging/testing" -) - -const testCG = "cg1" - -var m sync.RWMutex - -type FakeClusterAdmin struct { - sarama.ClusterAdmin - faulty bool -} - -func (f *FakeClusterAdmin) ListConsumerGroups() (map[string]string, error) { - cgs := map[string]string{ - testCG: "cg", - } - m.RLock() - defer m.RUnlock() - if f.faulty { - return nil, fmt.Errorf("Error") - } - return cgs, nil -} - -func TestAdminClient(t *testing.T) { - var wg sync.WaitGroup - wg.Add(10) - ctx := pkgtesting.TestContextWithLogger(t) - f := &FakeClusterAdmin{} - ac, err := NewAdminClient(ctx, func() (sarama.ClusterAdmin, error) { - return f, nil - }) - if err != nil { - t.Error("failed to obtain new client", err) - } - for i := 0; i < 10; i += 1 { - go func() { - doList(t, ac) - check := make(chan struct{}) - go func() { - m.Lock() - f.faulty = true - m.Unlock() - check <- struct{}{} - time.Sleep(2 * time.Second) - m.Lock() - f.faulty = false - m.Unlock() - check <- struct{}{} - }() - <-check - doList(t, ac) - <-check - wg.Done() - }() - } - wg.Wait() -} - -func doList(t *testing.T, ac AdminClient) { - cgs, _ := ac.ListConsumerGroups() - if len(cgs) != 1 { - t.Fatalf("list consumer group: got %d, want %d", len(cgs), 1) - } - if cgs[0] != testCG { - t.Fatalf("consumer group: got %s, want %s", cgs[0], testCG) - } -} diff --git a/pkg/channel/consolidated/reconciler/controller/consumer_group_watcher.go b/pkg/channel/consolidated/reconciler/controller/consumer_group_watcher.go deleted file mode 100644 index 353811c264..0000000000 --- a/pkg/channel/consolidated/reconciler/controller/consumer_group_watcher.go +++ /dev/null @@ -1,201 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "sync" - "time" - - "k8s.io/apimachinery/pkg/util/sets" - - "go.uber.org/zap" - "knative.dev/pkg/logging" - - "knative.dev/eventing-kafka/pkg/channel/consolidated/kafka" -) - -var ( - watchersMtx sync.RWMutex - cacheMtx sync.RWMutex - // Hooks into the poll logic for testing - after = time.After - done = func() {} -) - -type ConsumerGroupHandler func() -type Matcher func(string) bool - -type ConsumerGroupWatcher interface { - // Start instructs the watcher to start polling for the consumer groups and - // notify any observers on the event of any changes - Start() error - - // Terminate instructs the watcher to stop polling and clear the watchers cache - Terminate() - - // Watch registers callback on the event of any changes observed - // on the consumer groups. watcherID is an arbitrary string the user provides - // that will be used to identify his callbacks when provided to Forget(watcherID). - // - // To ensure this is event-triggered, level-driven, - // we don't pass the updates to the callback, instead the observer is expected - // to use List() to get the updated list of ConsumerGroups. - Watch(watcherID string, callback ConsumerGroupHandler) error - - // Forget removes all callbacks that correspond to the watcherID - Forget(watcherID string) - - // List returns all the cached consumer groups that match matcher. - // It will return an empty slice if none matched or the cache is empty - List(matcher Matcher) []string -} - -type WatcherImpl struct { - logger *zap.SugaredLogger - //TODO name? - watchers map[string]ConsumerGroupHandler - cachedConsumerGroups sets.String - adminClient kafka.AdminClient - pollDuration time.Duration - done chan struct{} -} - -func NewConsumerGroupWatcher(ctx context.Context, ac kafka.AdminClient, pollDuration time.Duration) ConsumerGroupWatcher { - return &WatcherImpl{ - logger: logging.FromContext(ctx), - adminClient: ac, - pollDuration: pollDuration, - watchers: make(map[string]ConsumerGroupHandler), - cachedConsumerGroups: sets.String{}, - } -} - -func (w *WatcherImpl) Start() error { - w.logger.Infow("ConsumerGroupWatcher starting. Polling for consumer groups", zap.Duration("poll duration", w.pollDuration)) - go func() { - for { - select { - case <-after(w.pollDuration): - // let's get current observed consumer groups - observedCGs, err := w.adminClient.ListConsumerGroups() - if err != nil { - w.logger.Errorw("error while listing consumer groups", zap.Error(err)) - continue - } - var notify bool - var changedGroup string - observedCGsSet := sets.String{}.Insert(observedCGs...) - // Look for observed CGs - for c := range observedCGsSet { - if !w.cachedConsumerGroups.Has(c) { - // This is the first appearance. - w.logger.Debugw("Consumer group observed. Caching.", - zap.String("consumer group", c)) - changedGroup = c - notify = true - break - } - } - // Look for disappeared CGs - for c := range w.cachedConsumerGroups { - if !observedCGsSet.Has(c) { - // This CG was cached but it's no longer there. - w.logger.Debugw("Consumer group deleted.", - zap.String("consumer group", c)) - changedGroup = c - notify = true - break - } - } - if notify { - cacheMtx.Lock() - w.cachedConsumerGroups = observedCGsSet - cacheMtx.Unlock() - w.notify(changedGroup) - } - done() - case <-w.done: - break - } - } - }() - return nil -} - -func (w *WatcherImpl) Terminate() { - watchersMtx.Lock() - cacheMtx.Lock() - defer watchersMtx.Unlock() - defer cacheMtx.Unlock() - - w.watchers = nil - w.cachedConsumerGroups = nil - if w.done != nil { - w.done <- struct{}{} - } -} - -// TODO explore returning a channel instead of a taking callback -func (w *WatcherImpl) Watch(watcherID string, cb ConsumerGroupHandler) error { - w.logger.Debugw("Adding a new watcher", zap.String("watcherID", watcherID)) - watchersMtx.Lock() - defer watchersMtx.Unlock() - w.watchers[watcherID] = cb - - // notify at least once to get the current state - cb() - return nil -} - -func (w *WatcherImpl) Forget(watcherID string) { - w.logger.Debugw("Forgetting watcher", zap.String("watcherID", watcherID)) - watchersMtx.Lock() - defer watchersMtx.Unlock() - delete(w.watchers, watcherID) -} - -func (w *WatcherImpl) List(matcher Matcher) []string { - w.logger.Debug("Listing consumer groups") - cacheMtx.RLock() - defer cacheMtx.RUnlock() - cgs := make([]string, 0) - for cg := range w.cachedConsumerGroups { - if matcher(cg) { - cgs = append(cgs, cg) - } - } - return cgs -} - -func (w *WatcherImpl) notify(cg string) { - watchersMtx.RLock() - defer watchersMtx.RUnlock() - - for _, cb := range w.watchers { - cb() - } -} - -func Find(list []string, item string) bool { - for _, i := range list { - if i == item { - return true - } - } - return false -} diff --git a/pkg/channel/consolidated/reconciler/controller/consumer_group_watcher_test.go b/pkg/channel/consolidated/reconciler/controller/consumer_group_watcher_test.go deleted file mode 100644 index e543c01ced..0000000000 --- a/pkg/channel/consolidated/reconciler/controller/consumer_group_watcher_test.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2020 The Knative Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "sync" - "testing" - "time" - - "k8s.io/apimachinery/pkg/util/sets" - - pkgtesting "knative.dev/pkg/logging/testing" -) - -//TODO how to mock the sarama AdminClient -type FakeClusterAdmin struct { - mutex sync.RWMutex - cgs sets.String -} - -func (fake *FakeClusterAdmin) ListConsumerGroups() ([]string, error) { - fake.mutex.RLock() - defer fake.mutex.RUnlock() - return fake.cgs.List(), nil -} - -func (fake *FakeClusterAdmin) deleteCG(cg string) { - fake.mutex.Lock() - defer fake.mutex.Unlock() - fake.cgs.Delete(cg) -} - -func TestKafkaWatcher(t *testing.T) { - cgname := "kafka.event-example.default-kne-trigger.0d9c4383-1e68-42b5-8c3a-3788274404c5" - wid := "channel-abc" - cgs := sets.String{} - cgs.Insert(cgname) - ca := FakeClusterAdmin{ - cgs: cgs, - } - - ch := make(chan sets.String, 1) - - w := NewConsumerGroupWatcher(pkgtesting.TestContextWithLogger(t), &ca, 2*time.Second) - w.Watch(wid, func() { - cgs := w.List(func(cg string) bool { - return cgname == cg - }) - result := sets.String{} - result.Insert(cgs...) - ch <- result - }) - - w.Start() - <-ch - assertSync(t, ch, cgs) - ca.deleteCG(cgname) - assertSync(t, ch, sets.String{}) -} - -func assertSync(t *testing.T, ch chan sets.String, cgs sets.String) { - select { - case syncedCGs := <-ch: - if !syncedCGs.Equal(cgs) { - t.Errorf("observed and expected consumer groups do not match. got %v expected %v", syncedCGs, cgs) - } - case <-time.After(6 * time.Second): - t.Errorf("timedout waiting for consumer groups to sync") - } -} diff --git a/pkg/channel/consolidated/reconciler/controller/controller.go b/pkg/channel/consolidated/reconciler/controller/controller.go index 400e952d7a..1b0b9ff340 100644 --- a/pkg/channel/consolidated/reconciler/controller/controller.go +++ b/pkg/channel/consolidated/reconciler/controller/controller.go @@ -40,7 +40,8 @@ import ( "k8s.io/client-go/tools/cache" kubeclient "knative.dev/pkg/client/injection/kube/client" "knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment" - "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints" + endpointsinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints" + "knative.dev/pkg/client/injection/kube/informers/core/v1/service" "knative.dev/pkg/client/injection/kube/informers/core/v1/serviceaccount" "knative.dev/pkg/client/injection/kube/informers/rbac/v1/rolebinding" @@ -58,6 +59,7 @@ import ( ) type TargetLister struct { + endpointLister corev1listers.EndpointsLister } func (t *TargetLister) ListProbeTargets(ctx context.Context, kc v1beta1.KafkaChannel) ([]status.ProbeTarget, error) { @@ -73,7 +75,7 @@ func (t *TargetLister) ListProbeTargets(ctx context.Context, kc v1beta1.KafkaCha // Get the Dispatcher Service Endpoints and propagate the status to the Channel // endpoints has the same name as the service, so not a bug. - eps, err := endpoints.Get(ctx).Lister().Endpoints(dispatcherNamespace).Get(dispatcherName) + eps, err := t.endpointLister.Endpoints(dispatcherNamespace).Get(dispatcherName) if err != nil { return nil, fmt.Errorf("failed to get internal service: %w", err) } @@ -96,7 +98,7 @@ func (t *TargetLister) ListProbeTargets(ctx context.Context, kc v1beta1.KafkaCha return []status.ProbeTarget{ { PodIPs: sets.NewString(readyIPs...), - PodPort: "8080", Port: "8080", URLs: uls, + PodPort: "8081", Port: "8081", URLs: uls, }, }, nil } @@ -110,7 +112,7 @@ func NewController( logger := logging.FromContext(ctx) kafkaChannelInformer := kafkachannel.Get(ctx) deploymentInformer := deployment.Get(ctx) - endpointsInformer := endpoints.Get(ctx) + endpointsInformer := endpointsinformer.Get(ctx) serviceAccountInformer := serviceaccount.Get(ctx) roleBindingInformer := rolebinding.Get(ctx) serviceInformer := service.Get(ctx) @@ -196,6 +198,8 @@ func NewController( } func NewProbeTargetLister(logger *zap.SugaredLogger, lister corev1listers.EndpointsLister) status.ProbeTargetLister { - tl := TargetLister{} + tl := TargetLister{ + endpointLister: lister, + } return &tl } diff --git a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go index ec65753165..197b83631b 100644 --- a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go +++ b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go @@ -23,7 +23,6 @@ import ( "knative.dev/eventing-kafka/pkg/channel/consolidated/status" - "knative.dev/eventing-kafka/pkg/channel/consolidated/kafka" "knative.dev/eventing-kafka/pkg/common/client" "knative.dev/eventing-kafka/pkg/common/constants" @@ -123,7 +122,6 @@ type Reconciler struct { // Using a shared kafkaClusterAdmin does not work currently because of an issue with // Shopify/sarama, see https://github.com/Shopify/sarama/issues/1162. kafkaClusterAdmin sarama.ClusterAdmin - consumerGroupWatcher ConsumerGroupWatcher kafkachannelLister listers.KafkaChannelLister kafkachannelInformer cache.SharedIndexInformer deploymentLister appsv1listers.DeploymentLister @@ -149,7 +147,6 @@ var _ kafkaChannelReconciler.Finalizer = (*Reconciler)(nil) func (r *Reconciler) ReconcileKind(ctx context.Context, kc *v1beta1.KafkaChannel) pkgreconciler.Event { kc.Status.InitializeConditions() - logger := logging.FromContext(ctx) // Verify channel is valid. kc.SetDefaults(ctx) @@ -575,24 +572,11 @@ func (r *Reconciler) updateKafkaConfig(ctx context.Context, configMap *corev1.Co // Eventually the previous config should be snapshotted to delete Kafka topics r.kafkaConfig = kafkaConfig r.kafkaConfigError = err - ac, err := kafka.NewAdminClient(ctx, func() (sarama.ClusterAdmin, error) { - return client.MakeAdminClient(ctx, controllerAgentName, r.kafkaAuthConfig, kafkaConfig.SaramaSettingsYamlString, kafkaConfig.Brokers) - }) if err != nil { logger.Errorw("Error creating AdminClient", zap.Error(err)) return } - - if r.consumerGroupWatcher != nil { - logger.Info("terminating consumer group watcher") - r.consumerGroupWatcher.Terminate() - logger.Info("terminated consumer group watcher") - } - - r.consumerGroupWatcher = NewConsumerGroupWatcher(ctx, ac, pollInterval) - //TODO handle error - r.consumerGroupWatcher.Start() } func (r *Reconciler) FinalizeKind(ctx context.Context, kc *v1beta1.KafkaChannel) pkgreconciler.Event { @@ -603,8 +587,9 @@ func (r *Reconciler) FinalizeKind(ctx context.Context, kc *v1beta1.KafkaChannel) return err } } - if r.consumerGroupWatcher != nil { - r.consumerGroupWatcher.Forget(string(kc.ObjectMeta.UID)) - } + //TODO + //if r.consumerGroupWatcher != nil { + // r.consumerGroupWatcher.Forget(string(kc.ObjectMeta.UID)) + //} return newReconciledNormal(kc.Namespace, kc.Name) //ok to remove finalizer } diff --git a/pkg/channel/consolidated/status/status.go b/pkg/channel/consolidated/status/status.go index 7aea5ebd4d..521d9c3817 100644 --- a/pkg/channel/consolidated/status/status.go +++ b/pkg/channel/consolidated/status/status.go @@ -435,16 +435,27 @@ func (m *Prober) onProbingCancellation(subscriptionState *targetState, podState } func (m *Prober) probeVerifier(item *workItem) prober.Verifier { - return func(r *http.Response, _ []byte) (bool, error) { + return func(r *http.Response, b []byte) (bool, error) { //TODO Check if we need to use a hash switch r.StatusCode { case http.StatusOK: /** {"my-kafka-channel":["90713ffd-f527-42bf-b158-57630b68ebe2","a2041ec2-3295-4cd8-ac31-e699ab08273e","d3d70a79-8528-4df6-a812-3b559380cf08","db536b74-45f8-41cd-ab3e-7e3f60ed9e35","eb3aeee9-7cb5-4cad-b4c4-424e436dac9f"]} */ + m.logger.Debug("Verifying response") var subscriptions = make(map[string][]string) - json.NewDecoder(r.Body).Decode(subscriptions) - if subs, ok := subscriptions[item.targetStates.ch.Name]; ok && sets.NewString(subs...).Has(string(item.targetStates.sub.UID)) { + err := json.Unmarshal(b, &subscriptions) + if err != nil { + m.logger.Errorw("Error unmarshaling", err) + return false, err + } + m.logger.Debugw("Got response", zap.Any("Response", b)) + m.logger.Debugw("Got list", zap.Any("Unmarshaled", subscriptions)) + uid := string(item.targetStates.sub.UID) + m.logger.Debugf("want %s", uid) + key := fmt.Sprintf("%s/%s", item.targetStates.ch.Namespace, item.targetStates.ch.Name) + if subs, ok := subscriptions[key]; ok && sets.NewString(subs...).Has(uid) { + return true, nil } else { //TODO return and error if the channel doesn't exist? diff --git a/pkg/common/consumer/consumer_handler.go b/pkg/common/consumer/consumer_handler.go index 092b3083fc..bd005701fb 100644 --- a/pkg/common/consumer/consumer_handler.go +++ b/pkg/common/consumer/consumer_handler.go @@ -67,7 +67,7 @@ func (consumer *SaramaConsumerHandler) Cleanup(session sarama.ConsumerGroupSessi // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages(). func (consumer *SaramaConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { consumer.logger.Info(fmt.Sprintf("Starting partition consumer, topic: %s, partition: %d, initialOffset: %d", claim.Topic(), claim.Partition(), claim.InitialOffset())) - + consumer.handler.SetReady(true) // NOTE: // Do not move the code below to a goroutine. // The `ConsumeClaim` itself is called within a goroutine, see: @@ -93,7 +93,6 @@ func (consumer *SaramaConsumerHandler) ConsumeClaim(session sarama.ConsumerGroup if ce := consumer.logger.Desugar().Check(zap.DebugLevel, "debugging"); ce != nil { consumer.logger.Debugw("Message marked", zap.String("topic", message.Topic), zap.Binary("value", message.Value)) } - consumer.handler.SetReady(true) } } diff --git a/third_party/VENDOR-LICENSE/knative.dev/networking/pkg/prober/LICENSE b/third_party/VENDOR-LICENSE/knative.dev/networking/pkg/prober/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/third_party/VENDOR-LICENSE/knative.dev/networking/pkg/prober/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go index 1ce0be2fa9..f85c0cc81a 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -318,7 +318,7 @@ func unescape(s string) (ch string, tail string, err error) { if i > utf8.MaxRune { return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) } - return string(i), s, nil + return string(rune(i)), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } diff --git a/vendor/golang.org/x/tools/cmd/goimports/doc.go b/vendor/golang.org/x/tools/cmd/goimports/doc.go index 7033e4d4cf..f344d8014a 100644 --- a/vendor/golang.org/x/tools/cmd/goimports/doc.go +++ b/vendor/golang.org/x/tools/cmd/goimports/doc.go @@ -1,3 +1,7 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + /* Command goimports updates your Go import lines, diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go index 7630629824..919d5305ab 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/util.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -1,3 +1,7 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package astutil import "go/ast" diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go index a4e40adba0..8be18a66b3 100644 --- a/vendor/golang.org/x/tools/imports/forward.go +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + // Package imports implements a Go pretty-printer (like package "go/format") // that also adds or removes import statements as necessary. package imports // import "golang.org/x/tools/imports" diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go index 60d45ac0e6..0cebac6e66 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/version.go +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -16,9 +16,20 @@ func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { inv.Verb = "list" inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`} inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") - // Unset any unneeded flags. + // Unset any unneeded flags, and remove them from BuildFlags, if they're + // present. inv.ModFile = "" inv.ModFlag = "" + var buildFlags []string + for _, flag := range inv.BuildFlags { + // Flags can be prefixed by one or two dashes. + f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-") + if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") { + continue + } + buildFlags = append(buildFlags, flag) + } + inv.BuildFlags = buildFlags stdoutBytes, err := r.Run(ctx, inv) if err != nil { return 0, err diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index ce3269a430..901449a820 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package imports import ( diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go index 5b4f03accd..18dada495c 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod_cache.go +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package imports import ( diff --git a/vendor/knative.dev/networking/LICENSE b/vendor/knative.dev/networking/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/knative.dev/networking/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/knative.dev/networking/pkg/prober/doc.go b/vendor/knative.dev/networking/pkg/prober/doc.go new file mode 100644 index 0000000000..1c971e14c6 --- /dev/null +++ b/vendor/knative.dev/networking/pkg/prober/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package prober contains functionality for implementing probing in knative serving. +package prober diff --git a/vendor/knative.dev/networking/pkg/prober/prober.go b/vendor/knative.dev/networking/pkg/prober/prober.go new file mode 100644 index 0000000000..6b216609aa --- /dev/null +++ b/vendor/knative.dev/networking/pkg/prober/prober.go @@ -0,0 +1,199 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prober + +import ( + "context" + "fmt" + "io/ioutil" + "net/http" + "sync" + "time" + + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/logging" +) + +// Preparer is a way for the caller to modify the HTTP request before it goes out. +type Preparer func(r *http.Request) *http.Request + +// Verifier is a way for the caller to validate the HTTP response after it comes back. +type Verifier func(r *http.Response, b []byte) (bool, error) + +// WithHeader sets a header in the probe request. +func WithHeader(name, value string) Preparer { + return func(r *http.Request) *http.Request { + r.Header.Set(name, value) + return r + } +} + +// WithHost sets the host in the probe request. +func WithHost(host string) Preparer { + return func(r *http.Request) *http.Request { + r.Host = host + return r + } +} + +// WithPath sets the path in the probe request. +func WithPath(path string) Preparer { + return func(r *http.Request) *http.Request { + r.URL.Path = path + return r + } +} + +// ExpectsBody validates that the body of the probe response matches the provided string. +func ExpectsBody(body string) Verifier { + return func(r *http.Response, b []byte) (bool, error) { + if string(b) == body { + return true, nil + } + return false, fmt.Errorf("unexpected body: want %q, got %q", body, string(b)) + } +} + +// ExpectsHeader validates that the given header of the probe response matches the provided string. +func ExpectsHeader(name, value string) Verifier { + return func(r *http.Response, _ []byte) (bool, error) { + if r.Header.Get(name) == value { + return true, nil + } + return false, fmt.Errorf("unexpected header %q: want %q, got %q", name, value, r.Header.Get(name)) + } +} + +// ExpectsStatusCodes validates that the given status code of the probe response matches the provided int. +func ExpectsStatusCodes(statusCodes []int) Verifier { + return func(r *http.Response, _ []byte) (bool, error) { + for _, v := range statusCodes { + if r.StatusCode == v { + return true, nil + } + } + return false, fmt.Errorf("unexpected status code: want %v, got %v", statusCodes, r.StatusCode) + } +} + +// Do sends a single probe to given target, e.g. `http://revision.default.svc.cluster.local:81`. +// Do returns whether the probe was successful or not, or there was an error probing. +func Do(ctx context.Context, transport http.RoundTripper, target string, ops ...interface{}) (bool, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, target, nil) + if err != nil { + return false, fmt.Errorf("%s is not a valid URL: %w", target, err) + } + for _, op := range ops { + if po, ok := op.(Preparer); ok { + req = po(req) + } + } + + resp, err := transport.RoundTrip(req) + if err != nil { + return false, fmt.Errorf("error roundtripping %s: %w", target, err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return false, fmt.Errorf("error reading body: %w", err) + } + + for _, op := range ops { + if vo, ok := op.(Verifier); ok { + if ok, err := vo(resp, body); err != nil || !ok { + return false, err + } + } + } + return true, nil +} + +// Done is a callback that is executed when the async probe has finished. +// `arg` is given by the caller at the offering time, while `success` and `err` +// are the return values of the `Do` call. +// It is assumed that the opaque arg is consistent for a given target and +// we will coalesce concurrent Offer invocations on target. +type Done func(arg interface{}, success bool, err error) + +// Manager manages async probes and makes sure we run concurrently only a single +// probe for the same key. +type Manager struct { + cb Done + // NB: it is paramount to use a transport that will close the connection + // after every request here. Otherwise the cached connections will prohibit + // scaling to zero, due to unsuccessful probes to the Activator. + transport http.RoundTripper + + // mu guards keys. + mu sync.Mutex + keys sets.String +} + +// New creates a new Manager, that will invoke the given callback when +// async probing is finished. +func New(cb Done, transport http.RoundTripper) *Manager { + return &Manager{ + keys: sets.NewString(), + cb: cb, + transport: transport, + } +} + +// Offer executes asynchronous probe using `target` as the key. +// If a probe with the same key already exists, Offer will return false and the +// call is discarded. If the request is accepted, Offer returns true. +// Otherwise Offer starts a goroutine that periodically executes +// `Do`, until timeout is reached, the probe succeeds, or fails with an error. +// In the end the callback is invoked with the provided `arg` and probing results. +func (m *Manager) Offer(ctx context.Context, target string, arg interface{}, period, timeout time.Duration, ops ...interface{}) bool { + m.mu.Lock() + defer m.mu.Unlock() + if m.keys.Has(target) { + return false + } + m.keys.Insert(target) + m.doAsync(ctx, target, arg, period, timeout, ops...) + return true +} + +// doAsync starts a go routine that probes the target with given period. +func (m *Manager) doAsync(ctx context.Context, target string, arg interface{}, period, timeout time.Duration, ops ...interface{}) { + logger := logging.FromContext(ctx) + go func() { + defer func() { + m.mu.Lock() + defer m.mu.Unlock() + m.keys.Delete(target) + }() + var ( + result bool + inErr error + ) + err := wait.PollImmediate(period, timeout, func() (bool, error) { + result, inErr = Do(ctx, m.transport, target, ops...) + // Do not return error, which is from verifierError, as retry is expected until timeout. + return result, nil + }) + if inErr != nil { + logger.Errorw("Unable to read sockstat", zap.Error(inErr)) + } + m.cb(arg, result, err) + }() +} diff --git a/vendor/modules.txt b/vendor/modules.txt index be865db6ea..316d8ff086 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -185,7 +185,7 @@ github.com/go-openapi/spec github.com/go-openapi/swag # github.com/gobuffalo/flect v0.2.2 github.com/gobuffalo/flect -# github.com/gogo/protobuf v1.3.1 +# github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys # github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b @@ -396,6 +396,7 @@ go.opentelemetry.io/otel/label go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/trace # go.uber.org/atomic v1.7.0 +## explicit go.uber.org/atomic # go.uber.org/automaxprocs v1.4.0 go.uber.org/automaxprocs/internal/cgroups @@ -471,7 +472,7 @@ golang.org/x/text/width # golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818 +# golang.org/x/tools v0.0.0-20210106214847-113979e3529a golang.org/x/tools/cmd/goimports golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/gcexportdata @@ -1211,6 +1212,9 @@ knative.dev/eventing/test/test_images/recordevents # knative.dev/hack v0.0.0-20210305150220-f99a25560134 ## explicit knative.dev/hack +# knative.dev/networking v0.0.0-20210304153916-f813b5904943 +## explicit +knative.dev/networking/pkg/prober # knative.dev/pkg v0.0.0-20210309024624-0f8d8de5949d ## explicit knative.dev/pkg/apiextensions/storageversion From c69c0ef89202948ecf5412d476fbab109817f165 Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Mon, 8 Mar 2021 12:11:42 +0100 Subject: [PATCH 03/17] Fix unittests after adding status prober Signed-off-by: Ahmed Abdalla --- .../reconciler/controller/kafkachannel.go | 2 +- .../controller/kafkachannel_test.go | 68 ++++++++++++++++--- pkg/channel/consolidated/status/status.go | 28 ++++---- 3 files changed, 73 insertions(+), 25 deletions(-) diff --git a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go index 197b83631b..b7343d47bb 100644 --- a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go +++ b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go @@ -129,7 +129,7 @@ type Reconciler struct { endpointsLister corev1listers.EndpointsLister serviceAccountLister corev1listers.ServiceAccountLister roleBindingLister rbacv1listers.RoleBindingLister - statusManager *status.Prober + statusManager status.Manager } var ( diff --git a/pkg/channel/consolidated/reconciler/controller/kafkachannel_test.go b/pkg/channel/consolidated/reconciler/controller/kafkachannel_test.go index 0bccf4490d..e4be5cb019 100644 --- a/pkg/channel/consolidated/reconciler/controller/kafkachannel_test.go +++ b/pkg/channel/consolidated/reconciler/controller/kafkachannel_test.go @@ -20,7 +20,6 @@ import ( "context" "fmt" "testing" - "time" eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" "knative.dev/pkg/apis" @@ -64,6 +63,7 @@ const ( finalizerName = "kafkachannels.messaging.knative.dev" sub1UID = "2f9b5e8e-deb6-11e8-9f32-f2801f1b9fd1" sub2UID = "34c5aec8-deb6-11e8-9f32-f2801f1b9fd1" + twoSubscribersPatch = `[{"op":"add","path":"/status/subscribers","value":[{"observedGeneration":1,"ready":"True","uid":"2f9b5e8e-deb6-11e8-9f32-f2801f1b9fd1"},{"observedGeneration":2,"ready":"True","uid":"34c5aec8-deb6-11e8-9f32-f2801f1b9fd1"}]}]` ) var ( @@ -263,6 +263,9 @@ func TestAllCases(t *testing.T) { WantEvents: []string{ Eventf(corev1.EventTypeNormal, "KafkaChannelReconciled", `KafkaChannel reconciled: "test-namespace/test-kc"`), }, + WantPatches: []clientgotesting.PatchActionImpl{ + makePatch(testNS, kcName, twoSubscribersPatch), + }, }, { Name: "channel exists, not owned by us", Key: kcKey, @@ -334,8 +337,7 @@ func TestAllCases(t *testing.T) { kafkaConfig: &KafkaConfig{ Brokers: []string{brokerName}, }, - consumerGroupWatcher: NewConsumerGroupWatcher(ctx, &FakeClusterAdmin{}, 100*time.Millisecond), - kafkachannelLister: listers.GetKafkaChannelLister(), + kafkachannelLister: listers.GetKafkaChannelLister(), // TODO fix kafkachannelInformer: nil, deploymentLister: listers.GetDeploymentLister(), @@ -345,6 +347,12 @@ func TestAllCases(t *testing.T) { kafkaClientSet: fakekafkaclient.Get(ctx), KubeClientSet: kubeclient.Get(ctx), EventingClientSet: eventingClient.Get(ctx), + statusManager: &fakeStatusManager{ + FakeIsReady: func(ctx context.Context, ch v1beta1.KafkaChannel, + sub eventingduckv1.SubscriberSpec) (bool, error) { + return true, nil + }, + }, } return kafkachannel.NewReconciler(ctx, logging.FromContext(ctx), r.kafkaClientSet, listers.GetKafkaChannelLister(), controller.GetEventRecorder(ctx), r) }, zap.L())) @@ -392,8 +400,7 @@ func TestTopicExists(t *testing.T) { kafkaConfig: &KafkaConfig{ Brokers: []string{brokerName}, }, - consumerGroupWatcher: NewConsumerGroupWatcher(ctx, &FakeClusterAdmin{}, 100*time.Millisecond), - kafkachannelLister: listers.GetKafkaChannelLister(), + kafkachannelLister: listers.GetKafkaChannelLister(), // TODO fix kafkachannelInformer: nil, deploymentLister: listers.GetDeploymentLister(), @@ -411,6 +418,12 @@ func TestTopicExists(t *testing.T) { kafkaClientSet: fakekafkaclient.Get(ctx), KubeClientSet: kubeclient.Get(ctx), EventingClientSet: eventingClient.Get(ctx), + statusManager: &fakeStatusManager{ + FakeIsReady: func(ctx context.Context, channel v1beta1.KafkaChannel, + spec eventingduckv1.SubscriberSpec) (bool, error) { + return true, nil + }, + }, } return kafkachannel.NewReconciler(ctx, logging.FromContext(ctx), r.kafkaClientSet, listers.GetKafkaChannelLister(), controller.GetEventRecorder(ctx), r) }, zap.L())) @@ -462,8 +475,7 @@ func TestDeploymentUpdatedOnImageChange(t *testing.T) { kafkaConfig: &KafkaConfig{ Brokers: []string{brokerName}, }, - consumerGroupWatcher: NewConsumerGroupWatcher(ctx, &FakeClusterAdmin{}, 100*time.Millisecond), - kafkachannelLister: listers.GetKafkaChannelLister(), + kafkachannelLister: listers.GetKafkaChannelLister(), // TODO fix kafkachannelInformer: nil, deploymentLister: listers.GetDeploymentLister(), @@ -481,6 +493,12 @@ func TestDeploymentUpdatedOnImageChange(t *testing.T) { kafkaClientSet: fakekafkaclient.Get(ctx), KubeClientSet: kubeclient.Get(ctx), EventingClientSet: eventingClient.Get(ctx), + statusManager: &fakeStatusManager{ + FakeIsReady: func(ctx context.Context, channel v1beta1.KafkaChannel, + spec eventingduckv1.SubscriberSpec) (bool, error) { + return true, nil + }, + }, } return kafkachannel.NewReconciler(ctx, logging.FromContext(ctx), r.kafkaClientSet, listers.GetKafkaChannelLister(), controller.GetEventRecorder(ctx), r) }, zap.L())) @@ -532,8 +550,7 @@ func TestDeploymentZeroReplicas(t *testing.T) { kafkaConfig: &KafkaConfig{ Brokers: []string{brokerName}, }, - consumerGroupWatcher: NewConsumerGroupWatcher(ctx, &FakeClusterAdmin{}, 100*time.Millisecond), - kafkachannelLister: listers.GetKafkaChannelLister(), + kafkachannelLister: listers.GetKafkaChannelLister(), // TODO fix kafkachannelInformer: nil, deploymentLister: listers.GetDeploymentLister(), @@ -551,6 +568,12 @@ func TestDeploymentZeroReplicas(t *testing.T) { kafkaClientSet: fakekafkaclient.Get(ctx), KubeClientSet: kubeclient.Get(ctx), EventingClientSet: eventingClient.Get(ctx), + statusManager: &fakeStatusManager{ + FakeIsReady: func(ctx context.Context, channel v1beta1.KafkaChannel, + spec eventingduckv1.SubscriberSpec) (bool, error) { + return true, nil + }, + }, } return kafkachannel.NewReconciler(ctx, logging.FromContext(ctx), r.kafkaClientSet, listers.GetKafkaChannelLister(), controller.GetEventRecorder(ctx), r) }, zap.L())) @@ -599,8 +622,7 @@ func TestDeploymentMoreThanOneReplicas(t *testing.T) { kafkaConfig: &KafkaConfig{ Brokers: []string{brokerName}, }, - consumerGroupWatcher: NewConsumerGroupWatcher(ctx, &FakeClusterAdmin{}, 100*time.Millisecond), - kafkachannelLister: listers.GetKafkaChannelLister(), + kafkachannelLister: listers.GetKafkaChannelLister(), // TODO fix kafkachannelInformer: nil, deploymentLister: listers.GetDeploymentLister(), @@ -618,6 +640,12 @@ func TestDeploymentMoreThanOneReplicas(t *testing.T) { kafkaClientSet: fakekafkaclient.Get(ctx), KubeClientSet: kubeclient.Get(ctx), EventingClientSet: eventingClient.Get(ctx), + statusManager: &fakeStatusManager{ + FakeIsReady: func(ctx context.Context, channel v1beta1.KafkaChannel, + spec eventingduckv1.SubscriberSpec) (bool, error) { + return true, nil + }, + }, } return kafkachannel.NewReconciler(ctx, logging.FromContext(ctx), r.kafkaClientSet, listers.GetKafkaChannelLister(), controller.GetEventRecorder(ctx), r) }, zap.L())) @@ -828,3 +856,21 @@ func subscribers() []eventingduckv1.SubscriberSpec { ReplyURI: apis.HTTP("sink2"), }} } + +type fakeStatusManager struct { + FakeIsReady func(context.Context, v1beta1.KafkaChannel, eventingduckv1.SubscriberSpec) (bool, error) +} + +func (m *fakeStatusManager) IsReady(ctx context.Context, ch v1beta1.KafkaChannel, sub eventingduckv1.SubscriberSpec) (bool, error) { + return m.FakeIsReady(ctx, ch, sub) +} + +func makePatch(namespace, name, patch string) clientgotesting.PatchActionImpl { + return clientgotesting.PatchActionImpl{ + ActionImpl: clientgotesting.ActionImpl{ + Namespace: namespace, + }, + Name: name, + Patch: []byte(patch), + } +} diff --git a/pkg/channel/consolidated/status/status.go b/pkg/channel/consolidated/status/status.go index 521d9c3817..c67a84d953 100644 --- a/pkg/channel/consolidated/status/status.go +++ b/pkg/channel/consolidated/status/status.go @@ -29,19 +29,16 @@ import ( "sync" "time" - v12 "knative.dev/eventing/pkg/apis/duck/v1" - - "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" - "go.uber.org/atomic" "go.uber.org/zap" "golang.org/x/time/rate" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/workqueue" + messagingv1beta1 "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" + eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" "knative.dev/networking/pkg/prober" "knative.dev/pkg/logging" ) @@ -61,8 +58,8 @@ var dialContext = (&net.Dialer{Timeout: probeTimeout}).DialContext // targetState represents the probing state of a subscription type targetState struct { hash string - sub v12.SubscriberSpec - ch v1beta1.KafkaChannel + sub eventingduckv1.SubscriberSpec + ch messagingv1beta1.KafkaChannel // pendingCount is the number of pods that haven't been successfully probed yet pendingCount atomic.Int32 @@ -106,7 +103,12 @@ type ProbeTarget struct { // ProbeTargetLister lists all the targets that requires probing. type ProbeTargetLister interface { // ListProbeTargets returns a list of targets to be probed - ListProbeTargets(ctx context.Context, ch v1beta1.KafkaChannel) ([]ProbeTarget, error) + ListProbeTargets(ctx context.Context, ch messagingv1beta1.KafkaChannel) ([]ProbeTarget, error) +} + +// Manager provides a way to check if an Ingress is ready +type Manager interface { + IsReady(ctx context.Context, ch messagingv1beta1.KafkaChannel, sub eventingduckv1.SubscriberSpec) (bool, error) } // Prober provides a way to check if a VirtualService is ready by probing the Envoy pods @@ -123,7 +125,7 @@ type Prober struct { targetLister ProbeTargetLister - readyCallback func(v1beta1.KafkaChannel, v12.SubscriberSpec) + readyCallback func(messagingv1beta1.KafkaChannel, eventingduckv1.SubscriberSpec) probeConcurrency int @@ -134,7 +136,7 @@ type Prober struct { func NewProber( logger *zap.SugaredLogger, targetLister ProbeTargetLister, - readyCallback func(v1beta1.KafkaChannel, v12.SubscriberSpec), opts ...interface{}) *Prober { + readyCallback func(messagingv1beta1.KafkaChannel, eventingduckv1.SubscriberSpec), opts ...interface{}) *Prober { return &Prober{ logger: logger, targetStates: make(map[types.UID]*targetState), @@ -154,7 +156,7 @@ func NewProber( } } -func computeHash(sub v12.SubscriberSpec) ([sha256.Size]byte, error) { +func computeHash(sub eventingduckv1.SubscriberSpec) ([sha256.Size]byte, error) { bytes, err := json.Marshal(sub) if err != nil { return [sha256.Size]byte{}, fmt.Errorf("failed to serialize Subscription: %w", err) @@ -162,7 +164,7 @@ func computeHash(sub v12.SubscriberSpec) ([sha256.Size]byte, error) { return sha256.Sum256(bytes), nil } -func (m *Prober) IsReady(ctx context.Context, ch v1beta1.KafkaChannel, sub v12.SubscriberSpec) (bool, error) { +func (m *Prober) IsReady(ctx context.Context, ch messagingv1beta1.KafkaChannel, sub eventingduckv1.SubscriberSpec) (bool, error) { subscriptionKey := sub.UID logger := logging.FromContext(ctx) @@ -310,7 +312,7 @@ func (m *Prober) Start(done <-chan struct{}) chan struct{} { } // CancelProbing cancels probing of the provided Subscription -func (m *Prober) CancelProbing(sub v12.SubscriberSpec) { +func (m *Prober) CancelProbing(sub eventingduckv1.SubscriberSpec) { key := sub.UID m.mu.Lock() defer m.mu.Unlock() From 442fe5d14c16ffec370c839e4cff7d751bca81dd Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Mon, 8 Mar 2021 13:00:05 +0100 Subject: [PATCH 04/17] Format and order go imports in cons. channel controller Signed-off-by: Ahmed Abdalla --- .../reconciler/controller/controller.go | 31 +++++++------------ .../reconciler/controller/kafkachannel.go | 8 ++--- .../controller/kafkachannel_test.go | 21 +++++-------- 3 files changed, 22 insertions(+), 38 deletions(-) diff --git a/pkg/channel/consolidated/reconciler/controller/controller.go b/pkg/channel/consolidated/reconciler/controller/controller.go index 1b0b9ff340..cfaf1b0013 100644 --- a/pkg/channel/consolidated/reconciler/controller/controller.go +++ b/pkg/channel/consolidated/reconciler/controller/controller.go @@ -21,41 +21,32 @@ import ( "fmt" "net/url" - v12 "knative.dev/eventing/pkg/apis/duck/v1" - - "knative.dev/eventing-kafka/pkg/channel/consolidated/status" - - "knative.dev/eventing/pkg/apis/eventing" - + "github.com/kelseyhightower/envconfig" + "go.uber.org/zap" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" - "github.com/kelseyhightower/envconfig" - "go.uber.org/zap" + "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" + "knative.dev/eventing-kafka/pkg/channel/consolidated/status" + kafkaChannelClient "knative.dev/eventing-kafka/pkg/client/injection/client" + "knative.dev/eventing-kafka/pkg/client/injection/informers/messaging/v1beta1/kafkachannel" + kafkaChannelReconciler "knative.dev/eventing-kafka/pkg/client/injection/reconciler/messaging/v1beta1/kafkachannel" commonconfig "knative.dev/eventing-kafka/pkg/common/config" - - "k8s.io/client-go/tools/cache" + v12 "knative.dev/eventing/pkg/apis/duck/v1" + "knative.dev/eventing/pkg/apis/eventing" + eventingClient "knative.dev/eventing/pkg/client/injection/client" kubeclient "knative.dev/pkg/client/injection/kube/client" "knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment" endpointsinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints" - "knative.dev/pkg/client/injection/kube/informers/core/v1/service" "knative.dev/pkg/client/injection/kube/informers/core/v1/serviceaccount" "knative.dev/pkg/client/injection/kube/informers/rbac/v1/rolebinding" - "knative.dev/pkg/configmap" "knative.dev/pkg/controller" "knative.dev/pkg/logging" "knative.dev/pkg/system" - - "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" - kafkaChannelClient "knative.dev/eventing-kafka/pkg/client/injection/client" - "knative.dev/eventing-kafka/pkg/client/injection/informers/messaging/v1beta1/kafkachannel" - kafkaChannelReconciler "knative.dev/eventing-kafka/pkg/client/injection/reconciler/messaging/v1beta1/kafkachannel" - eventingClient "knative.dev/eventing/pkg/client/injection/client" ) type TargetLister struct { diff --git a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go index b7343d47bb..33ec08d6bc 100644 --- a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go +++ b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go @@ -21,11 +21,6 @@ import ( "fmt" "time" - "knative.dev/eventing-kafka/pkg/channel/consolidated/status" - - "knative.dev/eventing-kafka/pkg/common/client" - "knative.dev/eventing-kafka/pkg/common/constants" - "github.com/Shopify/sarama" "go.uber.org/zap" appsv1 "k8s.io/api/apps/v1" @@ -45,11 +40,14 @@ import ( "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" "knative.dev/eventing-kafka/pkg/channel/consolidated/reconciler/controller/resources" + "knative.dev/eventing-kafka/pkg/channel/consolidated/status" "knative.dev/eventing-kafka/pkg/channel/consolidated/utils" kafkaclientset "knative.dev/eventing-kafka/pkg/client/clientset/versioned" kafkaScheme "knative.dev/eventing-kafka/pkg/client/clientset/versioned/scheme" kafkaChannelReconciler "knative.dev/eventing-kafka/pkg/client/injection/reconciler/messaging/v1beta1/kafkachannel" listers "knative.dev/eventing-kafka/pkg/client/listers/messaging/v1beta1" + "knative.dev/eventing-kafka/pkg/common/client" + "knative.dev/eventing-kafka/pkg/common/constants" v1 "knative.dev/eventing/pkg/apis/duck/v1" "knative.dev/eventing/pkg/apis/eventing" eventingclientset "knative.dev/eventing/pkg/client/clientset/versioned" diff --git a/pkg/channel/consolidated/reconciler/controller/kafkachannel_test.go b/pkg/channel/consolidated/reconciler/controller/kafkachannel_test.go index e4be5cb019..a67aa6f650 100644 --- a/pkg/channel/consolidated/reconciler/controller/kafkachannel_test.go +++ b/pkg/channel/consolidated/reconciler/controller/kafkachannel_test.go @@ -21,13 +21,8 @@ import ( "fmt" "testing" - eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" - "knative.dev/pkg/apis" - "github.com/Shopify/sarama" - "go.uber.org/zap" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -35,8 +30,15 @@ import ( "k8s.io/client-go/kubernetes/scheme" clientgotesting "k8s.io/client-go/testing" + "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" + "knative.dev/eventing-kafka/pkg/channel/consolidated/reconciler/controller/resources" + reconcilertesting "knative.dev/eventing-kafka/pkg/channel/consolidated/reconciler/testing" + . "knative.dev/eventing-kafka/pkg/channel/consolidated/utils" + fakekafkaclient "knative.dev/eventing-kafka/pkg/client/injection/client/fake" + "knative.dev/eventing-kafka/pkg/client/injection/reconciler/messaging/v1beta1/kafkachannel" + eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" eventingClient "knative.dev/eventing/pkg/client/injection/client" - + "knative.dev/pkg/apis" duckv1 "knative.dev/pkg/apis/duck/v1" kubeclient "knative.dev/pkg/client/injection/kube/client" "knative.dev/pkg/configmap" @@ -45,13 +47,6 @@ import ( "knative.dev/pkg/logging" "knative.dev/pkg/network" . "knative.dev/pkg/reconciler/testing" - - "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" - "knative.dev/eventing-kafka/pkg/channel/consolidated/reconciler/controller/resources" - reconcilertesting "knative.dev/eventing-kafka/pkg/channel/consolidated/reconciler/testing" - . "knative.dev/eventing-kafka/pkg/channel/consolidated/utils" - fakekafkaclient "knative.dev/eventing-kafka/pkg/client/injection/client/fake" - "knative.dev/eventing-kafka/pkg/client/injection/reconciler/messaging/v1beta1/kafkachannel" ) const ( From 7fba64ce93fb2dbab7c5fa2ecf648758d9a79b52 Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Mon, 8 Mar 2021 13:04:45 +0100 Subject: [PATCH 05/17] Rename import alias and remove unused variable Signed-off-by: Ahmed Abdalla --- pkg/channel/consolidated/reconciler/controller/controller.go | 4 ++-- .../consolidated/reconciler/controller/kafkachannel.go | 5 +---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/pkg/channel/consolidated/reconciler/controller/controller.go b/pkg/channel/consolidated/reconciler/controller/controller.go index cfaf1b0013..ac04768712 100644 --- a/pkg/channel/consolidated/reconciler/controller/controller.go +++ b/pkg/channel/consolidated/reconciler/controller/controller.go @@ -34,7 +34,7 @@ import ( "knative.dev/eventing-kafka/pkg/client/injection/informers/messaging/v1beta1/kafkachannel" kafkaChannelReconciler "knative.dev/eventing-kafka/pkg/client/injection/reconciler/messaging/v1beta1/kafkachannel" commonconfig "knative.dev/eventing-kafka/pkg/common/config" - v12 "knative.dev/eventing/pkg/apis/duck/v1" + eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" "knative.dev/eventing/pkg/apis/eventing" eventingClient "knative.dev/eventing/pkg/client/injection/client" kubeclient "knative.dev/pkg/client/injection/kube/client" @@ -138,7 +138,7 @@ func NewController( statusProber := status.NewProber( logger.Named("status-manager"), NewProbeTargetLister(logger, endpointsInformer.Lister()), - func(c v1beta1.KafkaChannel, s v12.SubscriberSpec) { + func(c v1beta1.KafkaChannel, s eventingduckv1.SubscriberSpec) { logger.Debugf("Ready callback triggered for channel: %s/%s subscription: %s", c.Namespace, c.Name, string(s.UID)) impl.EnqueueKey(types.NamespacedName{Namespace: c.Namespace, Name: c.Name}) }, diff --git a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go index 33ec08d6bc..6a14d4e9fa 100644 --- a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go +++ b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go @@ -19,7 +19,6 @@ package controller import ( "context" "fmt" - "time" "github.com/Shopify/sarama" "go.uber.org/zap" @@ -74,8 +73,6 @@ const ( dispatcherRoleBindingCreated = "DispatcherRoleBindingCreated" dispatcherName = "kafka-ch-dispatcher" - - pollInterval = 2 * time.Second ) func newReconciledNormal(namespace, name string) pkgreconciler.Event { @@ -91,7 +88,7 @@ func newDispatcherServiceWarn(err error) pkgreconciler.Event { } func newServiceAccountWarn(err error) pkgreconciler.Event { - return pkgreconciler.NewEvent(corev1.EventTypeWarning, "Dispatc erServiceAccountFailed", "Reconciling dispatcher ServiceAccount failed: %s", err) + return pkgreconciler.NewEvent(corev1.EventTypeWarning, "DispatcherServiceAccountFailed", "Reconciling dispatcher ServiceAccount failed: %s", err) } func newRoleBindingWarn(err error) pkgreconciler.Event { From 32e4b66a73365ca089c083527a0ab1992f1a1673 Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Thu, 11 Mar 2021 10:31:27 +0100 Subject: [PATCH 06/17] Add dispatcher prober test for tesitng a single pod Signed-off-by: Ahmed Abdalla --- .../consolidated/status/status_test.go | 224 ++++++++++++++++++ 1 file changed, 224 insertions(+) create mode 100644 pkg/channel/consolidated/status/status_test.go diff --git a/pkg/channel/consolidated/status/status_test.go b/pkg/channel/consolidated/status/status_test.go new file mode 100644 index 0000000000..cd72f7263a --- /dev/null +++ b/pkg/channel/consolidated/status/status_test.go @@ -0,0 +1,224 @@ +/* +Copyright 2019 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package status + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "path" + "strconv" + "testing" + "time" + + "go.uber.org/atomic" + + "knative.dev/pkg/apis" + + "k8s.io/apimachinery/pkg/types" + + "go.uber.org/zap/zaptest" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + + "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" + messagingv1beta1 "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" + eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" +) + +var ( + channelTemplate = &v1beta1.KafkaChannel{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "chan4prober", + }, + Spec: v1beta1.KafkaChannelSpec{ + NumPartitions: 1, + ReplicationFactor: 1, + }, + } + subscriptionTemplate = eventingduckv1.SubscriberSpec{ + UID: types.UID("90713ffd-f527-42bf-b158-57630b68ebe2"), + Generation: 1, + SubscriberURI: getURL("http://subscr.ns.local"), + } +) + +const dispatcherReadySubHeader = "K-Subscriber-Status" + +func getURL(s string) *apis.URL { + u, _ := apis.ParseURL("http://subscr.ns.local") + return u +} + +func handleProbe(t *testing.T) func(http.ResponseWriter, *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + channelRefName := channelTemplate.ObjectMeta.Name + channelRefNamespace := channelTemplate.ObjectMeta.Namespace + mapKeyName := fmt.Sprintf("%s/%s", channelRefNamespace, + channelRefName) + var subscriptions = map[string][]string{ + mapKeyName: { + string(subscriptionTemplate.UID), + }, + } + w.Header().Set(dispatcherReadySubHeader, channelRefName) + jsonResult, err := json.Marshal(subscriptions) + if err != nil { + t.Fatalf("Error marshalling json for sub-status channelref: %s/%s, %v", channelRefNamespace, channelRefName, err) + } + _, err = w.Write(jsonResult) + if err != nil { + t.Fatalf("Error writing jsonResult to serveHTTP writer: %v", err) + } + } +} + +type ReadyPair struct { + c v1beta1.KafkaChannel + s eventingduckv1.SubscriberSpec +} + +const HashHeaderName = "K-Network-Hash" + +func TestProbeSinglePod(t *testing.T) { + var succeed atomic.Bool + + ch := channelTemplate.DeepCopy() + sub := subscriptionTemplate.DeepCopy() + + hash, err := computeHash(*sub.DeepCopy()) + if err != nil { + t.Fatal("Failed to compute hash:", err) + } + + probeHandler := http.HandlerFunc(handleProbe(t)) + + // Probes only succeed if succeed is true + probeRequests := make(chan *http.Request) + finalHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + probeRequests <- r + if !succeed.Load() { + w.WriteHeader(http.StatusNotFound) + return + } + + // TODO Move const to dispatcher + r.Header.Set(HashHeaderName, fmt.Sprintf("%x", hash)) + probeHandler.ServeHTTP(w, r) + }) + + ts := httptest.NewServer(finalHandler) + defer ts.Close() + tsURL, err := url.Parse(ts.URL) + if err != nil { + t.Fatalf("Failed to parse URL %q: %v", ts.URL, err) + } + port, err := strconv.Atoi(tsURL.Port()) + if err != nil { + t.Fatalf("Failed to parse port %q: %v", tsURL.Port(), err) + } + hostname := tsURL.Hostname() + + ready := make(chan *ReadyPair) + + prober := NewProber( + zaptest.NewLogger(t).Sugar(), + fakeProbeTargetLister{{ + PodIPs: sets.NewString(hostname), + PodPort: strconv.Itoa(port), + URLs: []*url.URL{tsURL}, + }}, + func(c v1beta1.KafkaChannel, s eventingduckv1.SubscriberSpec) { + ready <- &ReadyPair{ + c, + s, + } + }) + + done := make(chan struct{}) + cancelled := prober.Start(done) + defer func() { + close(done) + <-cancelled + }() + + // The first call to IsReady must succeed and return false + ok, err := prober.IsReady(context.Background(), *ch, *sub) + if err != nil { + t.Fatal("IsReady failed:", err) + } + if ok { + t.Fatal("IsReady() returned true") + } + + select { + case <-ready: + // Since succeed is still false and we don't return 200, the prober shouldn't be ready + t.Fatal("Prober shouldn't be ready") + case <-time.After(1 * time.Second): + // Not ideal but it gives time to the prober to write to ready + break + } + + // Make probes to hostB succeed + succeed.Store(true) + + // Just drain the requests in the channel to not block the handler + go func() { + for range probeRequests { + } + }() + + select { + case <-ready: + // Wait for the probing to eventually succeed + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for probing to succeed.") + } +} + +type fakeProbeTargetLister []ProbeTarget + +func (l fakeProbeTargetLister) ListProbeTargets(ctx context.Context, kc messagingv1beta1.KafkaChannel) ([]ProbeTarget, error) { + targets := []ProbeTarget{} + for _, target := range l { + newTarget := ProbeTarget{ + PodIPs: target.PodIPs, + PodPort: target.PodPort, + Port: target.Port, + } + + for _, u := range target.URLs { + newURL := *u + newURL.Path = path.Join(newURL.Path, kc.Namespace, kc.Name) + newTarget.URLs = append(newTarget.URLs, &newURL) + } + targets = append(targets, newTarget) + } + return targets, nil +} + +type notFoundLister struct{} + +func (l notFoundLister) ListProbeTargets(ctx context.Context, obj interface{}) ([]ProbeTarget, error) { + return nil, errors.New("not found") +} From fcfa3b414115a3722c22060aacf84cd1297c1c69 Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Thu, 11 Mar 2021 10:57:43 +0100 Subject: [PATCH 07/17] Support probing dispatchers for multiple partitions kafka channels Signed-off-by: Ahmed Abdalla --- .../consolidated/deployments/dispatcher.yaml | 2 + .../roles/controller-clusterrole.yaml | 16 +--- .../dispatcher/consumer_message_handler.go | 5 ++ .../consolidated/dispatcher/dispatcher.go | 19 ++-- .../dispatcher/dispatcher_test.go | 6 +- .../dispatcher/kafka_subscription.go | 9 +- .../reconciler/controller/controller.go | 86 +++++++------------ .../reconciler/controller/kafkachannel.go | 10 +-- .../reconciler/controller/lister.go | 84 ++++++++++++++++++ pkg/channel/consolidated/status/status.go | 86 +++++++++++++------ .../consolidated/status/status_test.go | 36 ++------ pkg/common/consumer/consumer_handler.go | 3 +- pkg/source/adapter/adapter.go | 3 + .../google/mako/go/quickstore/BUILD | 24 ++++++ .../google/mako/internal/go/common/BUILD | 14 +++ .../grpc-gateway/internal/BUILD.bazel | 23 +++++ .../grpc-gateway/runtime/BUILD.bazel | 85 ++++++++++++++++++ .../grpc-gateway/utilities/BUILD.bazel | 21 +++++ .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 23 +++++ .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 21 +++++ .../apimachinery/pkg/api/resource/OWNERS | 13 +++ .../apimachinery/pkg/apis/meta/v1/OWNERS | 31 +++++++ .../apimachinery/pkg/util/mergepatch/OWNERS | 7 ++ .../pkg/util/strategicpatch/OWNERS | 8 ++ .../third_party/forked/golang/json/OWNERS | 7 ++ .../pkg/apis/clientauthentication/OWNERS | 9 ++ .../plugin/pkg/client/auth/gcp/OWNERS | 8 ++ .../plugin/pkg/client/auth/oidc/OWNERS | 7 ++ vendor/k8s.io/client-go/rest/OWNERS | 25 ++++++ vendor/k8s.io/client-go/tools/auth/OWNERS | 9 ++ vendor/k8s.io/client-go/tools/cache/OWNERS | 43 ++++++++++ .../client-go/tools/leaderelection/OWNERS | 13 +++ vendor/k8s.io/client-go/tools/metrics/OWNERS | 6 ++ vendor/k8s.io/client-go/tools/record/OWNERS | 28 ++++++ vendor/k8s.io/client-go/transport/OWNERS | 9 ++ vendor/k8s.io/client-go/util/cert/OWNERS | 9 ++ vendor/k8s.io/client-go/util/keyutil/OWNERS | 7 ++ vendor/k8s.io/client-go/util/retry/OWNERS | 4 + vendor/k8s.io/code-generator/OWNERS | 13 +++ .../code-generator/cmd/client-gen/OWNERS | 10 +++ .../code-generator/cmd/go-to-protobuf/OWNERS | 6 ++ vendor/k8s.io/klog/OWNERS | 19 ++++ vendor/k8s.io/klog/v2/OWNERS | 19 ++++ .../kube-openapi/pkg/generators/rules/OWNERS | 4 + .../k8s.io/kube-openapi/pkg/util/proto/OWNERS | 2 + vendor/k8s.io/utils/pointer/OWNERS | 10 +++ .../knative.dev/eventing/pkg/channel/OWNERS | 7 ++ vendor/knative.dev/eventing/test/OWNERS | 10 +++ .../eventing/test/performance/infra/OWNERS | 10 +++ .../test/test_images/performance/OWNERS | 10 +++ vendor/knative.dev/hack/OWNERS | 7 ++ vendor/knative.dev/hack/OWNERS_ALIASES | 16 ++++ vendor/knative.dev/pkg/apis/OWNERS | 7 ++ vendor/knative.dev/pkg/apis/duck/OWNERS | 7 ++ .../kube/informers/core/v1/pod/pod.go | 52 +++++++++++ vendor/knative.dev/pkg/configmap/OWNERS | 4 + vendor/knative.dev/pkg/controller/OWNERS | 7 ++ vendor/knative.dev/pkg/hack/OWNERS | 10 +++ vendor/knative.dev/pkg/hash/OWNERS | 7 ++ vendor/knative.dev/pkg/injection/OWNERS | 5 ++ vendor/knative.dev/pkg/kmeta/OWNERS | 4 + vendor/knative.dev/pkg/logging/OWNERS | 4 + vendor/knative.dev/pkg/metrics/OWNERS | 4 + vendor/knative.dev/pkg/network/OWNERS | 5 ++ vendor/knative.dev/pkg/reconciler/OWNERS | 7 ++ vendor/knative.dev/pkg/resolver/OWNERS | 5 ++ vendor/knative.dev/pkg/source/OWNERS | 4 + vendor/knative.dev/pkg/test/OWNERS | 10 +++ vendor/knative.dev/pkg/test/mako/OWNERS | 5 ++ vendor/knative.dev/pkg/tracing/config/OWNERS | 4 + vendor/knative.dev/pkg/webhook/OWNERS | 7 ++ vendor/modules.txt | 1 + vendor/sigs.k8s.io/yaml/OWNERS | 27 ++++++ 73 files changed, 1009 insertions(+), 139 deletions(-) create mode 100644 pkg/channel/consolidated/reconciler/controller/lister.go create mode 100644 vendor/github.com/google/mako/go/quickstore/BUILD create mode 100644 vendor/github.com/google/mako/internal/go/common/BUILD create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel create mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel create mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/OWNERS create mode 100644 vendor/k8s.io/client-go/rest/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/auth/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/cache/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/leaderelection/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/metrics/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/record/OWNERS create mode 100644 vendor/k8s.io/client-go/transport/OWNERS create mode 100644 vendor/k8s.io/client-go/util/cert/OWNERS create mode 100644 vendor/k8s.io/client-go/util/keyutil/OWNERS create mode 100644 vendor/k8s.io/client-go/util/retry/OWNERS create mode 100644 vendor/k8s.io/code-generator/OWNERS create mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/OWNERS create mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS create mode 100644 vendor/k8s.io/klog/OWNERS create mode 100644 vendor/k8s.io/klog/v2/OWNERS create mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS create mode 100644 vendor/k8s.io/utils/pointer/OWNERS create mode 100644 vendor/knative.dev/eventing/pkg/channel/OWNERS create mode 100644 vendor/knative.dev/eventing/test/OWNERS create mode 100644 vendor/knative.dev/eventing/test/performance/infra/OWNERS create mode 100644 vendor/knative.dev/eventing/test/test_images/performance/OWNERS create mode 100644 vendor/knative.dev/hack/OWNERS create mode 100644 vendor/knative.dev/hack/OWNERS_ALIASES create mode 100644 vendor/knative.dev/pkg/apis/OWNERS create mode 100644 vendor/knative.dev/pkg/apis/duck/OWNERS create mode 100644 vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/pod/pod.go create mode 100644 vendor/knative.dev/pkg/configmap/OWNERS create mode 100644 vendor/knative.dev/pkg/controller/OWNERS create mode 100644 vendor/knative.dev/pkg/hack/OWNERS create mode 100644 vendor/knative.dev/pkg/hash/OWNERS create mode 100644 vendor/knative.dev/pkg/injection/OWNERS create mode 100644 vendor/knative.dev/pkg/kmeta/OWNERS create mode 100644 vendor/knative.dev/pkg/logging/OWNERS create mode 100644 vendor/knative.dev/pkg/metrics/OWNERS create mode 100644 vendor/knative.dev/pkg/network/OWNERS create mode 100644 vendor/knative.dev/pkg/reconciler/OWNERS create mode 100644 vendor/knative.dev/pkg/resolver/OWNERS create mode 100644 vendor/knative.dev/pkg/source/OWNERS create mode 100644 vendor/knative.dev/pkg/test/OWNERS create mode 100644 vendor/knative.dev/pkg/test/mako/OWNERS create mode 100644 vendor/knative.dev/pkg/tracing/config/OWNERS create mode 100644 vendor/knative.dev/pkg/webhook/OWNERS create mode 100644 vendor/sigs.k8s.io/yaml/OWNERS diff --git a/config/channel/consolidated/deployments/dispatcher.yaml b/config/channel/consolidated/deployments/dispatcher.yaml index 896f9ea9e0..c168904a50 100644 --- a/config/channel/consolidated/deployments/dispatcher.yaml +++ b/config/channel/consolidated/deployments/dispatcher.yaml @@ -30,7 +30,9 @@ spec: template: metadata: labels: + # Do not change. Used by the controller for probing. messaging.knative.dev/channel: kafka-channel + # Do not change. Used by the controller for probing. messaging.knative.dev/role: dispatcher kafka.eventing.knative.dev/release: devel spec: diff --git a/config/channel/consolidated/roles/controller-clusterrole.yaml b/config/channel/consolidated/roles/controller-clusterrole.yaml index 42cacb2b40..dc46e7034b 100644 --- a/config/channel/consolidated/roles/controller-clusterrole.yaml +++ b/config/channel/consolidated/roles/controller-clusterrole.yaml @@ -39,7 +39,6 @@ rules: - apiGroups: - "" # Core API group. resources: - - services - configmaps - secrets verbs: @@ -51,6 +50,7 @@ rules: - "" # Core API group. resources: - services + - serviceaccounts verbs: &everything - get - list @@ -74,18 +74,11 @@ rules: - create - patch - update - - apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - apiGroups: - "" # Core API group. resources: - endpoints + - pods verbs: - get - list @@ -96,11 +89,6 @@ rules: - deployments - deployments/status verbs: *everything - - apiGroups: - - "" # Core API group. - resources: - - serviceaccounts - verbs: *everything - apiGroups: - rbac.authorization.k8s.io resources: diff --git a/pkg/channel/consolidated/dispatcher/consumer_message_handler.go b/pkg/channel/consolidated/dispatcher/consumer_message_handler.go index 0d08deb96f..7bc8dcadce 100644 --- a/pkg/channel/consolidated/dispatcher/consumer_message_handler.go +++ b/pkg/channel/consolidated/dispatcher/consumer_message_handler.go @@ -34,10 +34,15 @@ type consumerMessageHandler struct { sub Subscription dispatcher *eventingchannels.MessageDispatcherImpl kafkaSubscription *KafkaSubscription + consumerGroup string } var _ consumer.KafkaConsumerHandler = (*consumerMessageHandler)(nil) +func (c consumerMessageHandler) GetConsumerGroup() string { + return c.consumerGroup +} + func (c consumerMessageHandler) SetReady(ready bool) { c.kafkaSubscription.SetReady(c.sub.UID, ready) } diff --git a/pkg/channel/consolidated/dispatcher/dispatcher.go b/pkg/channel/consolidated/dispatcher/dispatcher.go index cc47515263..d72c0a61d9 100644 --- a/pkg/channel/consolidated/dispatcher/dispatcher.go +++ b/pkg/channel/consolidated/dispatcher/dispatcher.go @@ -234,6 +234,7 @@ func (d *KafkaDispatcher) UpdateKafkaConsumers(config *Config) (map[types.UID]er } } else { //ensure the pointer is populated or things go boom d.channelSubscriptions[channelRef] = &KafkaSubscription{ + logger: d.logger, subs: []types.UID{}, channelReadySubscriptions: sets.String{}, } @@ -291,13 +292,18 @@ func (d *KafkaDispatcher) UpdateHostToChannelMap(config *Config) error { // subscribe reads kafkaConsumers which gets updated in UpdateConfig in a separate go-routine. // subscribe must be called under updateLock. func (d *KafkaDispatcher) subscribe(channelRef eventingchannels.ChannelReference, sub Subscription) error { - d.logger.Info("Subscribing", zap.Any("channelRef", channelRef), zap.Any("subscription", sub.UID)) - + d.logger.Infow("Subscribing to Kafka Channel", zap.Any("channelRef", channelRef), zap.Any("subscription", sub.UID)) topicName := d.topicFunc(utils.KafkaChannelSeparator, channelRef.Namespace, channelRef.Name) groupID := fmt.Sprintf("kafka.%s.%s.%s", channelRef.Namespace, channelRef.Name, string(sub.UID)) - - handler := &consumerMessageHandler{d.logger, sub, d.dispatcher, d.channelSubscriptions[channelRef]} - + handler := &consumerMessageHandler{ + d.logger, + sub, + d.dispatcher, + d.channelSubscriptions[channelRef], + groupID, + } + d.logger.Debugw("Starting consumer group", zap.Any("channelRef", channelRef), + zap.Any("subscription", sub.UID), zap.String("topic", topicName), zap.String("consumer group", groupID)) consumerGroup, err := d.kafkaConsumerFactory.StartConsumerGroup(groupID, []string{topicName}, d.logger, handler) if err != nil { @@ -324,7 +330,7 @@ func (d *KafkaDispatcher) subscribe(channelRef eventingchannels.ChannelReference // unsubscribe reads kafkaConsumers which gets updated in UpdateConfig in a separate go-routine. // unsubscribe must be called under updateLock. func (d *KafkaDispatcher) unsubscribe(channel eventingchannels.ChannelReference, sub Subscription) error { - d.logger.Infow("Unsubscribing from channel", zap.Any("channel", channel), zap.String("subscription", sub.String())) + d.logger.Infow("Unsubscribing from channel", zap.Any("channel", channel), zap.Any("subscription", sub.UID)) delete(d.subscriptions, sub.UID) if _, ok := d.channelSubscriptions[channel]; !ok { return nil @@ -340,6 +346,7 @@ func (d *KafkaDispatcher) unsubscribe(channel eventingchannels.ChannelReference, } if consumer, ok := d.subsConsumerGroups[sub.UID]; ok { delete(d.subsConsumerGroups, sub.UID) + d.logger.Debugw("Closing cached consumer group", zap.Any("consumer group", consumer)) return consumer.Close() } return nil diff --git a/pkg/channel/consolidated/dispatcher/dispatcher_test.go b/pkg/channel/consolidated/dispatcher/dispatcher_test.go index 50375af28f..129927f6cc 100644 --- a/pkg/channel/consolidated/dispatcher/dispatcher_test.go +++ b/pkg/channel/consolidated/dispatcher/dispatcher_test.go @@ -33,10 +33,12 @@ import ( "go.uber.org/zap/zaptest" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/eventing-kafka/pkg/channel/consolidated/utils" "knative.dev/eventing-kafka/pkg/common/consumer" eventingchannels "knative.dev/eventing/pkg/channel" "knative.dev/eventing/pkg/channel/fanout" + klogtesting "knative.dev/pkg/logging/testing" _ "knative.dev/pkg/system/testing" ) @@ -444,6 +446,7 @@ func TestNewDispatcher(t *testing.T) { } func TestSetReady(t *testing.T) { + logger := klogtesting.TestLogger(t) testCases := []struct { name string ready bool @@ -500,6 +503,7 @@ func TestSetReady(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { t.Logf("Running %s", t.Name()) + tc.originalKafkaSub.logger = logger tc.originalKafkaSub.SetReady(tc.subID, tc.ready) if diff := cmp.Diff(tc.desiredKafkaSub.channelReadySubscriptions, tc.originalKafkaSub.channelReadySubscriptions); diff != "" { t.Errorf("unexpected ChannelReadySubscription (-want, +got) = %v", diff) @@ -599,7 +603,7 @@ func TestServeHTTP(t *testing.T) { } d := &KafkaDispatcher{ channelSubscriptions: make(map[eventingchannels.ChannelReference]*KafkaSubscription), - logger: zaptest.NewLogger(t).Sugar(), + logger: klogtesting.TestLogger(t), } ts := httptest.NewServer(d) defer ts.Close() diff --git a/pkg/channel/consolidated/dispatcher/kafka_subscription.go b/pkg/channel/consolidated/dispatcher/kafka_subscription.go index ab240c8c9d..34a4977f7f 100644 --- a/pkg/channel/consolidated/dispatcher/kafka_subscription.go +++ b/pkg/channel/consolidated/dispatcher/kafka_subscription.go @@ -19,13 +19,15 @@ package dispatcher import ( "sync" + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" ) type KafkaSubscription struct { - subs []types.UID - + logger *zap.SugaredLogger + subs []types.UID // readySubscriptionsLock must be used to synchronize access to channelReadySubscriptions readySubscriptionsLock sync.RWMutex channelReadySubscriptions sets.String @@ -33,14 +35,17 @@ type KafkaSubscription struct { // SetReady will mark the subid in the KafkaSubscription and call any registered callbacks func (ks *KafkaSubscription) SetReady(subID types.UID, ready bool) { + ks.logger.Debugw("Setting subscription readiness", zap.Any("subscription", subID), zap.Bool("ready", ready)) ks.readySubscriptionsLock.Lock() defer ks.readySubscriptionsLock.Unlock() if ready { if !ks.channelReadySubscriptions.Has(string(subID)) { + ks.logger.Debugw("Caching ready subscription", zap.Any("subscription", subID)) ks.channelReadySubscriptions.Insert(string(subID)) } } else { if ks.channelReadySubscriptions.Has(string(subID)) { + ks.logger.Debugw("Ejecting cached ready subscription", zap.Any("subscription", subID)) ks.channelReadySubscriptions.Delete(string(subID)) } } diff --git a/pkg/channel/consolidated/reconciler/controller/controller.go b/pkg/channel/consolidated/reconciler/controller/controller.go index ac04768712..55f4ed197e 100644 --- a/pkg/channel/consolidated/reconciler/controller/controller.go +++ b/pkg/channel/consolidated/reconciler/controller/controller.go @@ -18,14 +18,13 @@ package controller import ( "context" - "fmt" - "net/url" + + corev1 "k8s.io/api/core/v1" + knativeReconciler "knative.dev/pkg/reconciler" "github.com/kelseyhightower/envconfig" "go.uber.org/zap" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" - corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" @@ -35,11 +34,11 @@ import ( kafkaChannelReconciler "knative.dev/eventing-kafka/pkg/client/injection/reconciler/messaging/v1beta1/kafkachannel" commonconfig "knative.dev/eventing-kafka/pkg/common/config" eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" - "knative.dev/eventing/pkg/apis/eventing" eventingClient "knative.dev/eventing/pkg/client/injection/client" kubeclient "knative.dev/pkg/client/injection/kube/client" "knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment" endpointsinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints" + podinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/pod" "knative.dev/pkg/client/injection/kube/informers/core/v1/service" "knative.dev/pkg/client/injection/kube/informers/core/v1/serviceaccount" "knative.dev/pkg/client/injection/kube/informers/rbac/v1/rolebinding" @@ -49,50 +48,12 @@ import ( "knative.dev/pkg/system" ) -type TargetLister struct { - endpointLister corev1listers.EndpointsLister -} - -func (t *TargetLister) ListProbeTargets(ctx context.Context, kc v1beta1.KafkaChannel) ([]status.ProbeTarget, error) { - scope, ok := kc.Annotations[eventing.ScopeAnnotationKey] - if !ok { - scope = scopeCluster - } - - dispatcherNamespace := system.Namespace() - if scope == scopeNamespace { - dispatcherNamespace = kc.Namespace - } - - // Get the Dispatcher Service Endpoints and propagate the status to the Channel - // endpoints has the same name as the service, so not a bug. - eps, err := t.endpointLister.Endpoints(dispatcherNamespace).Get(dispatcherName) - if err != nil { - return nil, fmt.Errorf("failed to get internal service: %w", err) - } - var readyIPs []string - - for _, sub := range eps.Subsets { - for _, address := range sub.Addresses { - readyIPs = append(readyIPs, address.IP) - } - } - - if len(readyIPs) == 0 { - return nil, fmt.Errorf("no gateway pods available") - } - - u, _ := url.Parse(fmt.Sprintf("http://%s.%s/%s/%s", dispatcherName, dispatcherNamespace, kc.Namespace, kc.Name)) - - uls := []*url.URL{u} - - return []status.ProbeTarget{ - { - PodIPs: sets.NewString(readyIPs...), - PodPort: "8081", Port: "8081", URLs: uls, - }, - }, nil -} +const ( + channelLabelKey = "messaging.knative.dev/channel" + channelLabelValue = "kafka-channel" + roleLabelKey = "messaging.knative.dev/role" + roleLabelValue = "dispatcher" +) // NewController initializes the controller and is called by the generated code. // Registers event handlers to enqueue events. @@ -107,6 +68,7 @@ func NewController( serviceAccountInformer := serviceaccount.Get(ctx) roleBindingInformer := rolebinding.Get(ctx) serviceInformer := service.Get(ctx) + podInformer := podinformer.Get(ctx) r := &Reconciler{ systemNamespace: system.Namespace(), @@ -185,12 +147,24 @@ func NewController( Handler: controller.HandleAll(grCh), }) - return impl -} + podInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: knativeReconciler.ChainFilterFuncs( + knativeReconciler.LabelFilterFunc(channelLabelKey, channelLabelValue, false), + knativeReconciler.LabelFilterFunc(roleLabelKey, roleLabelValue, false), + ), + Handler: cache.ResourceEventHandlerFuncs{ + // Cancel probing when a Pod is deleted + DeleteFunc: func(obj interface{}) { + pod, ok := obj.(*corev1.Pod) + if ok { + logger.Debugw("Dispatcher pod deleted. Canceling pod probing.", + zap.String("pod", pod.GetName())) + statusProber.CancelPodProbing(pod) + impl.GlobalResync(kafkaChannelInformer.Informer()) + } + }, + }, + }) -func NewProbeTargetLister(logger *zap.SugaredLogger, lister corev1listers.EndpointsLister) status.ProbeTargetLister { - tl := TargetLister{ - endpointLister: lister, - } - return &tl + return impl } diff --git a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go index 6a14d4e9fa..59ad71b601 100644 --- a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go +++ b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go @@ -75,6 +75,11 @@ const ( dispatcherName = "kafka-ch-dispatcher" ) +var ( + scopeNamespace = "namespace" + scopeCluster = "cluster" +) + func newReconciledNormal(namespace, name string) pkgreconciler.Event { return pkgreconciler.NewEvent(corev1.EventTypeNormal, "KafkaChannelReconciled", "KafkaChannel reconciled: \"%s/%s\"", namespace, name) } @@ -127,11 +132,6 @@ type Reconciler struct { statusManager status.Manager } -var ( - scopeNamespace = "namespace" - scopeCluster = "cluster" -) - type envConfig struct { Image string `envconfig:"DISPATCHER_IMAGE" required:"true"` } diff --git a/pkg/channel/consolidated/reconciler/controller/lister.go b/pkg/channel/consolidated/reconciler/controller/lister.go new file mode 100644 index 0000000000..db39a2b251 --- /dev/null +++ b/pkg/channel/consolidated/reconciler/controller/lister.go @@ -0,0 +1,84 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "net/url" + + "knative.dev/eventing-kafka/pkg/channel/consolidated/status" + + "go.uber.org/zap" + "k8s.io/apimachinery/pkg/util/sets" + v1 "k8s.io/client-go/listers/core/v1" + "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" + "knative.dev/eventing/pkg/apis/eventing" + "knative.dev/pkg/system" +) + +type DispatcherPodsLister struct { + logger *zap.SugaredLogger + endpointLister v1.EndpointsLister +} + +func (t *DispatcherPodsLister) ListProbeTargets(ctx context.Context, kc v1beta1.KafkaChannel) (*status.ProbeTarget, error) { + scope, ok := kc.Annotations[eventing.ScopeAnnotationKey] + if !ok { + scope = scopeCluster + } + + dispatcherNamespace := system.Namespace() + if scope == scopeNamespace { + dispatcherNamespace = kc.Namespace + } + + // Get the Dispatcher Service Endpoints and propagate the status to the Channel + // endpoints has the same name as the service, so not a bug. + eps, err := t.endpointLister.Endpoints(dispatcherNamespace).Get(dispatcherName) + if err != nil { + return nil, fmt.Errorf("failed to get internal service: %w", err) + } + var readyIPs []string + + for _, sub := range eps.Subsets { + for _, address := range sub.Addresses { + readyIPs = append(readyIPs, address.IP) + } + } + + if len(readyIPs) == 0 { + return nil, fmt.Errorf("no gateway pods available") + } + + u, _ := url.Parse(fmt.Sprintf("http://%s.%s/%s/%s", dispatcherName, dispatcherNamespace, kc.Namespace, kc.Name)) + + return &status.ProbeTarget{ + PodIPs: sets.NewString(readyIPs...), + PodPort: "8081", + Port: "8081", + URL: u, + }, nil +} + +func NewProbeTargetLister(logger *zap.SugaredLogger, lister v1.EndpointsLister) status.ProbeTargetLister { + tl := DispatcherPodsLister{ + logger: logger, + endpointLister: lister, + } + return &tl +} diff --git a/pkg/channel/consolidated/status/status.go b/pkg/channel/consolidated/status/status.go index c67a84d953..18b3be9ab4 100644 --- a/pkg/channel/consolidated/status/status.go +++ b/pkg/channel/consolidated/status/status.go @@ -40,6 +40,7 @@ import ( messagingv1beta1 "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" "knative.dev/networking/pkg/prober" + "knative.dev/pkg/kmeta" "knative.dev/pkg/logging" ) @@ -63,6 +64,9 @@ type targetState struct { // pendingCount is the number of pods that haven't been successfully probed yet pendingCount atomic.Int32 + // readyCount is the number of pods that have the subscription ready + readyCount atomic.Int32 + initialCount int lastAccessed time.Time cancel func() @@ -97,13 +101,13 @@ type ProbeTarget struct { PodIPs sets.String PodPort string Port string - URLs []*url.URL + URL *url.URL } // ProbeTargetLister lists all the targets that requires probing. type ProbeTargetLister interface { // ListProbeTargets returns a list of targets to be probed - ListProbeTargets(ctx context.Context, ch messagingv1beta1.KafkaChannel) ([]ProbeTarget, error) + ListProbeTargets(ctx context.Context, ch messagingv1beta1.KafkaChannel) (*ProbeTarget, error) } // Manager provides a way to check if an Ingress is ready @@ -164,6 +168,21 @@ func computeHash(sub eventingduckv1.SubscriberSpec) ([sha256.Size]byte, error) { return sha256.Sum256(bytes), nil } +func (m *Prober) checkReadiness(state *targetState) bool { + consumers := int32(state.initialCount) + partitions := state.ch.Spec.NumPartitions + m.logger.Debugw("Checking subscription readiness", + zap.Any("initial probed consumers", consumers), + zap.Any("channel partitions", partitions), + zap.Any("ready consumers", state.readyCount.Load()), + ) + if consumers > partitions { + return state.readyCount.Load() == partitions + } else { + return state.readyCount.Load() == consumers + } +} + func (m *Prober) IsReady(ctx context.Context, ch messagingv1beta1.KafkaChannel, sub eventingduckv1.SubscriberSpec) (bool, error) { subscriptionKey := sub.UID logger := logging.FromContext(ctx) @@ -180,7 +199,9 @@ func (m *Prober) IsReady(ctx context.Context, ch messagingv1beta1.KafkaChannel, if state, ok := m.targetStates[subscriptionKey]; ok { if state.hash == hash { state.lastAccessed = time.Now() - return state.pendingCount.Load() == 0, true + logger.Debugw("Subscription is hashed. Checking readiness", + zap.Any("subscription", sub.UID)) + return m.checkReadiness(state), true } // Cancel the polling for the outdated version @@ -202,26 +223,27 @@ func (m *Prober) IsReady(ctx context.Context, ch messagingv1beta1.KafkaChannel, } // Get the probe targets and group them by IP - targets, err := m.targetLister.ListProbeTargets(ctx, ch) + target, err := m.targetLister.ListProbeTargets(ctx, ch) if err != nil { + logger.Errorw("Error listing probe targets", zap.Error(err), + zap.Any("subscription", sub.UID)) return false, err } + workItems := make(map[string][]*workItem) - for _, target := range targets { - for ip := range target.PodIPs { - for _, url := range target.URLs { - workItems[ip] = append(workItems[ip], &workItem{ - targetStates: subscriptionState, - url: url, - podIP: ip, - podPort: target.PodPort, - logger: logger, - }) - } - } + for ip := range target.PodIPs { + workItems[ip] = append(workItems[ip], &workItem{ + targetStates: subscriptionState, + url: target.URL, + podIP: ip, + podPort: target.PodPort, + logger: logger, + }) } + subscriptionState.initialCount = target.PodIPs.Len() subscriptionState.pendingCount.Store(int32(len(workItems))) + subscriptionState.readyCount.Store(0) for ip, ipWorkItems := range workItems { // Get or create the context for that IP @@ -279,7 +301,7 @@ func (m *Prober) IsReady(ctx context.Context, ch messagingv1beta1.KafkaChannel, defer m.mu.Unlock() m.targetStates[subscriptionKey] = subscriptionState }() - return len(workItems) == 0, nil + return false, nil } // Start starts the Manager background operations @@ -313,7 +335,12 @@ func (m *Prober) Start(done <-chan struct{}) chan struct{} { // CancelProbing cancels probing of the provided Subscription func (m *Prober) CancelProbing(sub eventingduckv1.SubscriberSpec) { - key := sub.UID + acc, err := kmeta.DeletionHandlingAccessor(sub) + if err != nil { + return + } + + key := acc.GetUID() m.mu.Lock() defer m.mu.Unlock() if state, ok := m.targetStates[key]; ok { @@ -409,9 +436,9 @@ func (m *Prober) onProbingSuccess(subscriptionState *targetState, podState *podS if podState.pendingCount.Dec() == 0 { // Unlock the goroutine blocked on <-podCtx.Done() podState.cancel() - // This is the last pod being successfully probed, the subscription is ready - if subscriptionState.pendingCount.Dec() == 0 { + if m.checkReadiness(subscriptionState) { + subscriptionState.cancel() m.readyCallback(subscriptionState.ch, subscriptionState.sub) } } @@ -429,6 +456,7 @@ func (m *Prober) onProbingCancellation(subscriptionState *targetState, podState if podState.pendingCount.CAS(pendingCount, 0) { // This is the last pod being successfully probed, the subscription is ready if subscriptionState.pendingCount.Dec() == 0 { + subscriptionState.cancel() m.readyCallback(subscriptionState.ch, subscriptionState.sub) } return @@ -438,32 +466,36 @@ func (m *Prober) onProbingCancellation(subscriptionState *targetState, podState func (m *Prober) probeVerifier(item *workItem) prober.Verifier { return func(r *http.Response, b []byte) (bool, error) { - //TODO Check if we need to use a hash + m.logger.Debugw("Verifying response", zap.Int("status code", r.StatusCode), + zap.ByteString("body", b)) switch r.StatusCode { case http.StatusOK: /** {"my-kafka-channel":["90713ffd-f527-42bf-b158-57630b68ebe2","a2041ec2-3295-4cd8-ac31-e699ab08273e","d3d70a79-8528-4df6-a812-3b559380cf08","db536b74-45f8-41cd-ab3e-7e3f60ed9e35","eb3aeee9-7cb5-4cad-b4c4-424e436dac9f"]} */ - m.logger.Debug("Verifying response") var subscriptions = make(map[string][]string) err := json.Unmarshal(b, &subscriptions) if err != nil { - m.logger.Errorw("Error unmarshaling", err) + m.logger.Errorw("error unmarshaling", err) return false, err } - m.logger.Debugw("Got response", zap.Any("Response", b)) - m.logger.Debugw("Got list", zap.Any("Unmarshaled", subscriptions)) uid := string(item.targetStates.sub.UID) - m.logger.Debugf("want %s", uid) key := fmt.Sprintf("%s/%s", item.targetStates.ch.Namespace, item.targetStates.ch.Name) + m.logger.Debugw("Received proper probing response from target", + zap.Any("found subscriptions", subscriptions), + zap.String("pod ip", item.podIP), + zap.String("want channel", key), + zap.String("want subscription", uid), + ) if subs, ok := subscriptions[key]; ok && sets.NewString(subs...).Has(uid) { - + item.targetStates.readyCount.Inc() return true, nil } else { //TODO return and error if the channel doesn't exist? return false, nil } case http.StatusNotFound, http.StatusServiceUnavailable: + m.logger.Errorf("unexpected status code: want %v, got %v", http.StatusOK, r.StatusCode) return false, fmt.Errorf("unexpected status code: want %v, got %v", http.StatusOK, r.StatusCode) default: item.logger.Errorf("Probing of %s abandoned, IP: %s:%s: the response status is %v, expected one of: %v", diff --git a/pkg/channel/consolidated/status/status_test.go b/pkg/channel/consolidated/status/status_test.go index cd72f7263a..08f213ee9b 100644 --- a/pkg/channel/consolidated/status/status_test.go +++ b/pkg/channel/consolidated/status/status_test.go @@ -24,24 +24,20 @@ import ( "net/http" "net/http/httptest" "net/url" - "path" "strconv" "testing" "time" "go.uber.org/atomic" - - "knative.dev/pkg/apis" - - "k8s.io/apimachinery/pkg/types" - "go.uber.org/zap/zaptest" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" messagingv1beta1 "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" + "knative.dev/pkg/apis" ) var ( @@ -142,11 +138,11 @@ func TestProbeSinglePod(t *testing.T) { prober := NewProber( zaptest.NewLogger(t).Sugar(), - fakeProbeTargetLister{{ + fakeProbeTargetLister{ PodIPs: sets.NewString(hostname), PodPort: strconv.Itoa(port), - URLs: []*url.URL{tsURL}, - }}, + URL: tsURL, + }, func(c v1beta1.KafkaChannel, s eventingduckv1.SubscriberSpec) { ready <- &ReadyPair{ c, @@ -196,25 +192,11 @@ func TestProbeSinglePod(t *testing.T) { } } -type fakeProbeTargetLister []ProbeTarget - -func (l fakeProbeTargetLister) ListProbeTargets(ctx context.Context, kc messagingv1beta1.KafkaChannel) ([]ProbeTarget, error) { - targets := []ProbeTarget{} - for _, target := range l { - newTarget := ProbeTarget{ - PodIPs: target.PodIPs, - PodPort: target.PodPort, - Port: target.Port, - } +type fakeProbeTargetLister ProbeTarget - for _, u := range target.URLs { - newURL := *u - newURL.Path = path.Join(newURL.Path, kc.Namespace, kc.Name) - newTarget.URLs = append(newTarget.URLs, &newURL) - } - targets = append(targets, newTarget) - } - return targets, nil +func (l fakeProbeTargetLister) ListProbeTargets(ctx context.Context, kc messagingv1beta1.KafkaChannel) (*ProbeTarget, error) { + t := ProbeTarget(l) + return &t, nil } type notFoundLister struct{} diff --git a/pkg/common/consumer/consumer_handler.go b/pkg/common/consumer/consumer_handler.go index bd005701fb..18492ec93d 100644 --- a/pkg/common/consumer/consumer_handler.go +++ b/pkg/common/consumer/consumer_handler.go @@ -29,6 +29,7 @@ type KafkaConsumerHandler interface { // The returned error is enqueued in errors channel. Handle(context context.Context, message *sarama.ConsumerMessage) (bool, error) SetReady(ready bool) + GetConsumerGroup() string } // ConsumerHandler implements sarama.ConsumerGroupHandler and provides some glue code to simplify message handling @@ -66,7 +67,7 @@ func (consumer *SaramaConsumerHandler) Cleanup(session sarama.ConsumerGroupSessi // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages(). func (consumer *SaramaConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { - consumer.logger.Info(fmt.Sprintf("Starting partition consumer, topic: %s, partition: %d, initialOffset: %d", claim.Topic(), claim.Partition(), claim.InitialOffset())) + consumer.logger.Infow(fmt.Sprintf("Starting partition consumer, topic: %s, partition: %d, initialOffset: %d", claim.Topic(), claim.Partition(), claim.InitialOffset()), zap.String("ConsumeGroup", consumer.handler.GetConsumerGroup())) consumer.handler.SetReady(true) // NOTE: // Do not move the code below to a goroutine. diff --git a/pkg/source/adapter/adapter.go b/pkg/source/adapter/adapter.go index 0a3c7b1985..6c4c4b0ac5 100644 --- a/pkg/source/adapter/adapter.go +++ b/pkg/source/adapter/adapter.go @@ -82,6 +82,9 @@ func NewAdapter(ctx context.Context, processed adapter.EnvConfigAccessor, httpMe keyTypeMapper: getKeyTypeMapper(config.KeyType), } } +func (a *Adapter) GetConsumerGroup() string { + return a.config.ConsumerGroup +} func (a *Adapter) Start(ctx context.Context) error { a.logger.Infow("Starting with config: ", diff --git a/vendor/github.com/google/mako/go/quickstore/BUILD b/vendor/github.com/google/mako/go/quickstore/BUILD new file mode 100644 index 0000000000..4623b68ead --- /dev/null +++ b/vendor/github.com/google/mako/go/quickstore/BUILD @@ -0,0 +1,24 @@ +# Quickstore library for Go. +# See quickstore.go and go/mako-quickstore for documentation. +# See quickstore_example_test.go for example usage. +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +go_library( + name = "quickstore", + srcs = ["quickstore.go"], + importpath = "github.com/google/mako/go/quickstore", + deps = [ + "//internal/go/common", + "//internal/quickstore_microservice/proto:quickstore_go_grpc_pb", + "//internal/quickstore_microservice/proto:quickstore_go_proto", + "//proto/quickstore:quickstore_go_proto", + "//spec/proto:mako_go_proto", + "@com_github_golang_glog//:go_default_library", + "@com_github_golang_protobuf//proto:go_default_library", + "@org_golang_google_grpc//:go_default_library", + ], +) diff --git a/vendor/github.com/google/mako/internal/go/common/BUILD b/vendor/github.com/google/mako/internal/go/common/BUILD new file mode 100644 index 0000000000..1f90d9eda1 --- /dev/null +++ b/vendor/github.com/google/mako/internal/go/common/BUILD @@ -0,0 +1,14 @@ +# Go libraries for mako +package(default_visibility = ["//:internal"]) + +licenses(["notice"]) + +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "common", + srcs = [ + "common_deps.go", + ], + importpath = "github.com/google/mako/internal/go/common", +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel new file mode 100644 index 0000000000..5242751fb2 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel @@ -0,0 +1,23 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +package(default_visibility = ["//visibility:public"]) + +proto_library( + name = "internal_proto", + srcs = ["errors.proto"], + deps = ["@com_google_protobuf//:any_proto"], +) + +go_proto_library( + name = "internal_go_proto", + importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", + proto = ":internal_proto", +) + +go_library( + name = "go_default_library", + embed = [":internal_go_proto"], + importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel new file mode 100644 index 0000000000..58b72b9cf7 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel @@ -0,0 +1,85 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "go_default_library", + srcs = [ + "context.go", + "convert.go", + "doc.go", + "errors.go", + "fieldmask.go", + "handler.go", + "marshal_httpbodyproto.go", + "marshal_json.go", + "marshal_jsonpb.go", + "marshal_proto.go", + "marshaler.go", + "marshaler_registry.go", + "mux.go", + "pattern.go", + "proto2_convert.go", + "proto_errors.go", + "query.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", + deps = [ + "//internal:go_default_library", + "//utilities:go_default_library", + "@com_github_golang_protobuf//descriptor:go_default_library_gen", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@go_googleapis//google/api:httpbody_go_proto", + "@io_bazel_rules_go//proto/wkt:any_go_proto", + "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", + "@io_bazel_rules_go//proto/wkt:duration_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//grpclog:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "context_test.go", + "convert_test.go", + "errors_test.go", + "fieldmask_test.go", + "handler_test.go", + "marshal_httpbodyproto_test.go", + "marshal_json_test.go", + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", + "mux_test.go", + "pattern_test.go", + "query_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//internal:go_default_library", + "//runtime/internal/examplepb:go_default_library", + "//utilities:go_default_library", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@go_googleapis//google/api:httpbody_go_proto", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@io_bazel_rules_go//proto/wkt:duration_go_proto", + "@io_bazel_rules_go//proto/wkt:empty_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@io_bazel_rules_go//proto/wkt:struct_go_proto", + "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel new file mode 100644 index 0000000000..7109d79323 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "pattern.go", + "readerfactory.go", + "trie.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities", +) + +go_test( + name = "go_default_test", + size = "small", + srcs = ["trie_test.go"], + embed = [":go_default_library"], +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS new file mode 100644 index 0000000000..d18a17885b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -0,0 +1,23 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- gmarek +- erictune +- saad-ali +- janetkuo +- tallclair +- dims +- hongchaodeng +- krousey +- cjcullen diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS new file mode 100644 index 0000000000..68b8d353ca --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS @@ -0,0 +1,21 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- thockin +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- gmarek +- janetkuo +- ncdc +- dims +- krousey +- resouer +- mfojtik +- jianhuiz diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS new file mode 100644 index 0000000000..7ac0fe11a1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -0,0 +1,13 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- derekwaynecarr +- mikedanese +- saad-ali +- janetkuo +- xiang90 +- mbohlool diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS new file mode 100644 index 0000000000..15b4c875a3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -0,0 +1,31 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- thockin +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- caesarxuchao +- liggitt +- nikhiljindal +- gmarek +- erictune +- davidopp +- sttts +- quinton-hoole +- luxas +- janetkuo +- justinsb +- ncdc +- soltysh +- dims +- madhusudancs +- hongchaodeng +- krousey +- mml +- mbohlool +- therc +- mqliang +- kevin-wangzefeng +- jianhuiz diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS new file mode 100644 index 0000000000..3f72c69ba3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- pwittrock +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS new file mode 100644 index 0000000000..cfee199fa0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- pwittrock +- mengqiy +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS new file mode 100644 index 0000000000..3f72c69ba3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- pwittrock +reviewers: +- mengqiy +- apelisse diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS new file mode 100644 index 0000000000..e0ec62deb2 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# approval on api packages bubbles to api-approvers +reviewers: +- sig-auth-authenticators-approvers +- sig-auth-authenticators-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS new file mode 100644 index 0000000000..97fcd3dd30 --- /dev/null +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- cjcullen +- jlowdermilk +reviewers: +- cjcullen +- jlowdermilk diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/OWNERS b/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/OWNERS new file mode 100644 index 0000000000..03fb44ea24 --- /dev/null +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- ericchiang +reviewers: +- ericchiang +- rithujohn191 diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS new file mode 100644 index 0000000000..c02ec6a250 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/OWNERS @@ -0,0 +1,25 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- thockin +- smarterclayton +- caesarxuchao +- wojtek-t +- deads2k +- brendandburns +- liggitt +- nikhiljindal +- gmarek +- erictune +- sttts +- luxas +- dims +- errordeveloper +- hongchaodeng +- krousey +- resouer +- cjcullen +- rmmh +- asalkeld +- juanvallejo +- lojies diff --git a/vendor/k8s.io/client-go/tools/auth/OWNERS b/vendor/k8s.io/client-go/tools/auth/OWNERS new file mode 100644 index 0000000000..3e05d309be --- /dev/null +++ b/vendor/k8s.io/client-go/tools/auth/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- sig-auth-authenticators-approvers +reviewers: +- sig-auth-authenticators-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS new file mode 100644 index 0000000000..7bbe635426 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -0,0 +1,43 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- caesarxuchao +- liggitt +- ncdc +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- erictune +- davidopp +- pmorie +- janetkuo +- justinsb +- soltysh +- jsafrane +- dims +- madhusudancs +- hongchaodeng +- krousey +- xiang90 +- mml +- ingvagabund +- resouer +- jessfraz +- mfojtik +- mqliang +- sdminonne +- ncdc diff --git a/vendor/k8s.io/client-go/tools/leaderelection/OWNERS b/vendor/k8s.io/client-go/tools/leaderelection/OWNERS new file mode 100644 index 0000000000..9ece5e1ea4 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/leaderelection/OWNERS @@ -0,0 +1,13 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- mikedanese +- timothysc +reviewers: +- wojtek-t +- deads2k +- mikedanese +- gmarek +- timothysc +- ingvagabund +- resouer diff --git a/vendor/k8s.io/client-go/tools/metrics/OWNERS b/vendor/k8s.io/client-go/tools/metrics/OWNERS new file mode 100644 index 0000000000..77bcb5090c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/metrics/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- wojtek-t +- krousey +- jayunit100 diff --git a/vendor/k8s.io/client-go/tools/record/OWNERS b/vendor/k8s.io/client-go/tools/record/OWNERS new file mode 100644 index 0000000000..792f356b0d --- /dev/null +++ b/vendor/k8s.io/client-go/tools/record/OWNERS @@ -0,0 +1,28 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- derekwaynecarr +- caesarxuchao +- vishh +- mikedanese +- liggitt +- nikhiljindal +- erictune +- pmorie +- dchen1107 +- saad-ali +- luxas +- yifan-gu +- mwielgus +- timothysc +- jsafrane +- dims +- krousey +- a-robinson +- aveshagarwal +- resouer +- cjcullen diff --git a/vendor/k8s.io/client-go/transport/OWNERS b/vendor/k8s.io/client-go/transport/OWNERS new file mode 100644 index 0000000000..a52176903c --- /dev/null +++ b/vendor/k8s.io/client-go/transport/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- smarterclayton +- wojtek-t +- deads2k +- liggitt +- krousey +- caesarxuchao diff --git a/vendor/k8s.io/client-go/util/cert/OWNERS b/vendor/k8s.io/client-go/util/cert/OWNERS new file mode 100644 index 0000000000..3cf0364383 --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- sig-auth-certificates-approvers +reviewers: +- sig-auth-certificates-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/client-go/util/keyutil/OWNERS b/vendor/k8s.io/client-go/util/keyutil/OWNERS new file mode 100644 index 0000000000..470b7a1c92 --- /dev/null +++ b/vendor/k8s.io/client-go/util/keyutil/OWNERS @@ -0,0 +1,7 @@ +approvers: +- sig-auth-certificates-approvers +reviewers: +- sig-auth-certificates-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/client-go/util/retry/OWNERS b/vendor/k8s.io/client-go/util/retry/OWNERS new file mode 100644 index 0000000000..dec3e88d63 --- /dev/null +++ b/vendor/k8s.io/client-go/util/retry/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- caesarxuchao diff --git a/vendor/k8s.io/code-generator/OWNERS b/vendor/k8s.io/code-generator/OWNERS new file mode 100644 index 0000000000..6f7abe3edb --- /dev/null +++ b/vendor/k8s.io/code-generator/OWNERS @@ -0,0 +1,13 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- lavalamp +- wojtek-t +- sttts +reviewers: +- lavalamp +- wojtek-t +- sttts +labels: +- sig/api-machinery +- area/code-generation diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS new file mode 100644 index 0000000000..62866d0b19 --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- lavalamp +- wojtek-t +- caesarxuchao +reviewers: +- lavalamp +- wojtek-t +- caesarxuchao diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS new file mode 100644 index 0000000000..613659162a --- /dev/null +++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- smarterclayton +reviewers: +- smarterclayton diff --git a/vendor/k8s.io/klog/OWNERS b/vendor/k8s.io/klog/OWNERS new file mode 100644 index 0000000000..380e514f28 --- /dev/null +++ b/vendor/k8s.io/klog/OWNERS @@ -0,0 +1,19 @@ +# See the OWNERS docs at https://go.k8s.io/owners +reviewers: + - jayunit100 + - hoegaarden + - andyxning + - neolit123 + - pohly + - yagonobre + - vincepri + - detiber +approvers: + - dims + - thockin + - justinsb + - tallclair + - piosz + - brancz + - DirectXMan12 + - lavalamp diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS new file mode 100644 index 0000000000..380e514f28 --- /dev/null +++ b/vendor/k8s.io/klog/v2/OWNERS @@ -0,0 +1,19 @@ +# See the OWNERS docs at https://go.k8s.io/owners +reviewers: + - jayunit100 + - hoegaarden + - andyxning + - neolit123 + - pohly + - yagonobre + - vincepri + - detiber +approvers: + - dims + - thockin + - justinsb + - tallclair + - piosz + - brancz + - DirectXMan12 + - lavalamp diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS b/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS new file mode 100644 index 0000000000..235bc545b8 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS @@ -0,0 +1,4 @@ +reviewers: +- roycaihw +approvers: +- roycaihw diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS new file mode 100644 index 0000000000..9621a6a3a4 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS @@ -0,0 +1,2 @@ +approvers: +- apelisse diff --git a/vendor/k8s.io/utils/pointer/OWNERS b/vendor/k8s.io/utils/pointer/OWNERS new file mode 100644 index 0000000000..0d6392752a --- /dev/null +++ b/vendor/k8s.io/utils/pointer/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- apelisse +- stewart-yu +- thockin +reviewers: +- apelisse +- stewart-yu +- thockin diff --git a/vendor/knative.dev/eventing/pkg/channel/OWNERS b/vendor/knative.dev/eventing/pkg/channel/OWNERS new file mode 100644 index 0000000000..05ce860f94 --- /dev/null +++ b/vendor/knative.dev/eventing/pkg/channel/OWNERS @@ -0,0 +1,7 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +reviewers: +- channel-reviewers + +labels: +- area/pkg-channel diff --git a/vendor/knative.dev/eventing/test/OWNERS b/vendor/knative.dev/eventing/test/OWNERS new file mode 100644 index 0000000000..c50adc8493 --- /dev/null +++ b/vendor/knative.dev/eventing/test/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- productivity-approvers + +reviewers: +- productivity-reviewers + +labels: +- area/test-and-release diff --git a/vendor/knative.dev/eventing/test/performance/infra/OWNERS b/vendor/knative.dev/eventing/test/performance/infra/OWNERS new file mode 100644 index 0000000000..0cfccadd87 --- /dev/null +++ b/vendor/knative.dev/eventing/test/performance/infra/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- performance-approvers + +reviewers: +- performance-reviewers + +labels: +- area/performance diff --git a/vendor/knative.dev/eventing/test/test_images/performance/OWNERS b/vendor/knative.dev/eventing/test/test_images/performance/OWNERS new file mode 100644 index 0000000000..0cfccadd87 --- /dev/null +++ b/vendor/knative.dev/eventing/test/test_images/performance/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- performance-approvers + +reviewers: +- performance-reviewers + +labels: +- area/performance diff --git a/vendor/knative.dev/hack/OWNERS b/vendor/knative.dev/hack/OWNERS new file mode 100644 index 0000000000..d76f0af39e --- /dev/null +++ b/vendor/knative.dev/hack/OWNERS @@ -0,0 +1,7 @@ +approvers: + - technical-oversight-committee + - productivity-approvers + +reviewers: + - productivity-approvers + - productivity-reviewers diff --git a/vendor/knative.dev/hack/OWNERS_ALIASES b/vendor/knative.dev/hack/OWNERS_ALIASES new file mode 100644 index 0000000000..84bbd0d3b7 --- /dev/null +++ b/vendor/knative.dev/hack/OWNERS_ALIASES @@ -0,0 +1,16 @@ +aliases: + technical-oversight-committee: + - evankanderson + - grantr + - markusthoemmes + - mattmoor + - tcnghia + productivity-approvers: + - chaodaiG + - chizhg + - coryrc + - n3wscott + productivity-reviewers: + - steuhs + - peterfeifanchen + - efiturri diff --git a/vendor/knative.dev/pkg/apis/OWNERS b/vendor/knative.dev/pkg/apis/OWNERS new file mode 100644 index 0000000000..1227b2b9e8 --- /dev/null +++ b/vendor/knative.dev/pkg/apis/OWNERS @@ -0,0 +1,7 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- apis-approvers + +reviewers: +- apis-reviewers diff --git a/vendor/knative.dev/pkg/apis/duck/OWNERS b/vendor/knative.dev/pkg/apis/duck/OWNERS new file mode 100644 index 0000000000..8df611ba00 --- /dev/null +++ b/vendor/knative.dev/pkg/apis/duck/OWNERS @@ -0,0 +1,7 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- apis-duck-approvers + +reviewers: +- apis-duck-reviewers diff --git a/vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/pod/pod.go b/vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/pod/pod.go new file mode 100644 index 0000000000..8f8a6fffbc --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/kube/informers/core/v1/pod/pod.go @@ -0,0 +1,52 @@ +/* +Copyright 2020 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package pod + +import ( + context "context" + + v1 "k8s.io/client-go/informers/core/v1" + factory "knative.dev/pkg/client/injection/kube/informers/factory" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterInformer(withInformer) +} + +// Key is used for associating the Informer inside the context.Context. +type Key struct{} + +func withInformer(ctx context.Context) (context.Context, controller.Informer) { + f := factory.Get(ctx) + inf := f.Core().V1().Pods() + return context.WithValue(ctx, Key{}, inf), inf.Informer() +} + +// Get extracts the typed informer from the context. +func Get(ctx context.Context) v1.PodInformer { + untyped := ctx.Value(Key{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch k8s.io/client-go/informers/core/v1.PodInformer from context.") + } + return untyped.(v1.PodInformer) +} diff --git a/vendor/knative.dev/pkg/configmap/OWNERS b/vendor/knative.dev/pkg/configmap/OWNERS new file mode 100644 index 0000000000..2480fc6d43 --- /dev/null +++ b/vendor/knative.dev/pkg/configmap/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- configmap-approvers diff --git a/vendor/knative.dev/pkg/controller/OWNERS b/vendor/knative.dev/pkg/controller/OWNERS new file mode 100644 index 0000000000..0b270d53af --- /dev/null +++ b/vendor/knative.dev/pkg/controller/OWNERS @@ -0,0 +1,7 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- controller-approvers + +reviewers: +- controller-reviewers diff --git a/vendor/knative.dev/pkg/hack/OWNERS b/vendor/knative.dev/pkg/hack/OWNERS new file mode 100644 index 0000000000..c50adc8493 --- /dev/null +++ b/vendor/knative.dev/pkg/hack/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- productivity-approvers + +reviewers: +- productivity-reviewers + +labels: +- area/test-and-release diff --git a/vendor/knative.dev/pkg/hash/OWNERS b/vendor/knative.dev/pkg/hash/OWNERS new file mode 100644 index 0000000000..0b270d53af --- /dev/null +++ b/vendor/knative.dev/pkg/hash/OWNERS @@ -0,0 +1,7 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- controller-approvers + +reviewers: +- controller-reviewers diff --git a/vendor/knative.dev/pkg/injection/OWNERS b/vendor/knative.dev/pkg/injection/OWNERS new file mode 100644 index 0000000000..dda47512a4 --- /dev/null +++ b/vendor/knative.dev/pkg/injection/OWNERS @@ -0,0 +1,5 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- mattmoor +- n3wscott diff --git a/vendor/knative.dev/pkg/kmeta/OWNERS b/vendor/knative.dev/pkg/kmeta/OWNERS new file mode 100644 index 0000000000..29b0d9f256 --- /dev/null +++ b/vendor/knative.dev/pkg/kmeta/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- kmeta-approvers diff --git a/vendor/knative.dev/pkg/logging/OWNERS b/vendor/knative.dev/pkg/logging/OWNERS new file mode 100644 index 0000000000..fa4854ba0a --- /dev/null +++ b/vendor/knative.dev/pkg/logging/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- logging-approvers diff --git a/vendor/knative.dev/pkg/metrics/OWNERS b/vendor/knative.dev/pkg/metrics/OWNERS new file mode 100644 index 0000000000..6d3966df44 --- /dev/null +++ b/vendor/knative.dev/pkg/metrics/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- metrics-approvers diff --git a/vendor/knative.dev/pkg/network/OWNERS b/vendor/knative.dev/pkg/network/OWNERS new file mode 100644 index 0000000000..b14ac5308e --- /dev/null +++ b/vendor/knative.dev/pkg/network/OWNERS @@ -0,0 +1,5 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- tcnghia +- vagababov diff --git a/vendor/knative.dev/pkg/reconciler/OWNERS b/vendor/knative.dev/pkg/reconciler/OWNERS new file mode 100644 index 0000000000..0b270d53af --- /dev/null +++ b/vendor/knative.dev/pkg/reconciler/OWNERS @@ -0,0 +1,7 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- controller-approvers + +reviewers: +- controller-reviewers diff --git a/vendor/knative.dev/pkg/resolver/OWNERS b/vendor/knative.dev/pkg/resolver/OWNERS new file mode 100644 index 0000000000..acf2ee2c1c --- /dev/null +++ b/vendor/knative.dev/pkg/resolver/OWNERS @@ -0,0 +1,5 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- n3wscott +- vaikas-google diff --git a/vendor/knative.dev/pkg/source/OWNERS b/vendor/knative.dev/pkg/source/OWNERS new file mode 100644 index 0000000000..8d16912160 --- /dev/null +++ b/vendor/knative.dev/pkg/source/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- source-approvers diff --git a/vendor/knative.dev/pkg/test/OWNERS b/vendor/knative.dev/pkg/test/OWNERS new file mode 100644 index 0000000000..c50adc8493 --- /dev/null +++ b/vendor/knative.dev/pkg/test/OWNERS @@ -0,0 +1,10 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- productivity-approvers + +reviewers: +- productivity-reviewers + +labels: +- area/test-and-release diff --git a/vendor/knative.dev/pkg/test/mako/OWNERS b/vendor/knative.dev/pkg/test/mako/OWNERS new file mode 100644 index 0000000000..e3eb2fca5d --- /dev/null +++ b/vendor/knative.dev/pkg/test/mako/OWNERS @@ -0,0 +1,5 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- vagababov + diff --git a/vendor/knative.dev/pkg/tracing/config/OWNERS b/vendor/knative.dev/pkg/tracing/config/OWNERS new file mode 100644 index 0000000000..2480fc6d43 --- /dev/null +++ b/vendor/knative.dev/pkg/tracing/config/OWNERS @@ -0,0 +1,4 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- configmap-approvers diff --git a/vendor/knative.dev/pkg/webhook/OWNERS b/vendor/knative.dev/pkg/webhook/OWNERS new file mode 100644 index 0000000000..001df221fe --- /dev/null +++ b/vendor/knative.dev/pkg/webhook/OWNERS @@ -0,0 +1,7 @@ +# The OWNERS file is used by prow to automatically merge approved PRs. + +approvers: +- webhook-approvers + +reviewers: +- webhook-reviewers diff --git a/vendor/modules.txt b/vendor/modules.txt index 316d8ff086..36b4b3b5b9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1245,6 +1245,7 @@ knative.dev/pkg/client/injection/kube/informers/apps/v1/statefulset knative.dev/pkg/client/injection/kube/informers/apps/v1/statefulset/fake knative.dev/pkg/client/injection/kube/informers/core/v1/endpoints knative.dev/pkg/client/injection/kube/informers/core/v1/namespace +knative.dev/pkg/client/injection/kube/informers/core/v1/pod knative.dev/pkg/client/injection/kube/informers/core/v1/secret knative.dev/pkg/client/injection/kube/informers/core/v1/secret/fake knative.dev/pkg/client/injection/kube/informers/core/v1/service diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS new file mode 100644 index 0000000000..325b40b076 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/OWNERS @@ -0,0 +1,27 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- dims +- lavalamp +- smarterclayton +- deads2k +- sttts +- liggitt +- caesarxuchao +reviewers: +- dims +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- gmarek +- sttts +- ncdc +- tallclair +labels: +- sig/api-machinery From dfcd43d6fe319ae2628edc25ff1c279d0f946924 Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Fri, 12 Mar 2021 15:29:41 +0100 Subject: [PATCH 08/17] Update deps Signed-off-by: Ahmed Abdalla --- .../google/mako/go/quickstore/BUILD | 24 ------ .../google/mako/internal/go/common/BUILD | 14 --- .../grpc-gateway/internal/BUILD.bazel | 23 ----- .../grpc-gateway/runtime/BUILD.bazel | 85 ------------------- .../grpc-gateway/utilities/BUILD.bazel | 21 ----- .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 23 ----- .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 21 ----- .../apimachinery/pkg/api/resource/OWNERS | 13 --- .../apimachinery/pkg/apis/meta/v1/OWNERS | 31 ------- .../apimachinery/pkg/util/mergepatch/OWNERS | 7 -- .../pkg/util/strategicpatch/OWNERS | 8 -- .../third_party/forked/golang/json/OWNERS | 7 -- .../pkg/apis/clientauthentication/OWNERS | 9 -- .../plugin/pkg/client/auth/gcp/OWNERS | 8 -- .../plugin/pkg/client/auth/oidc/OWNERS | 7 -- vendor/k8s.io/client-go/rest/OWNERS | 25 ------ vendor/k8s.io/client-go/tools/auth/OWNERS | 9 -- vendor/k8s.io/client-go/tools/cache/OWNERS | 43 ---------- .../client-go/tools/leaderelection/OWNERS | 13 --- vendor/k8s.io/client-go/tools/metrics/OWNERS | 6 -- vendor/k8s.io/client-go/tools/record/OWNERS | 28 ------ vendor/k8s.io/client-go/transport/OWNERS | 9 -- vendor/k8s.io/client-go/util/cert/OWNERS | 9 -- vendor/k8s.io/client-go/util/keyutil/OWNERS | 7 -- vendor/k8s.io/client-go/util/retry/OWNERS | 4 - vendor/k8s.io/code-generator/OWNERS | 13 --- .../code-generator/cmd/client-gen/OWNERS | 10 --- .../code-generator/cmd/go-to-protobuf/OWNERS | 6 -- vendor/k8s.io/klog/OWNERS | 19 ----- vendor/k8s.io/klog/v2/OWNERS | 19 ----- .../kube-openapi/pkg/generators/rules/OWNERS | 4 - .../k8s.io/kube-openapi/pkg/util/proto/OWNERS | 2 - vendor/k8s.io/utils/pointer/OWNERS | 10 --- .../knative.dev/eventing/pkg/channel/OWNERS | 7 -- vendor/knative.dev/eventing/test/OWNERS | 10 --- .../eventing/test/performance/infra/OWNERS | 10 --- .../test/test_images/performance/OWNERS | 10 --- vendor/knative.dev/hack/OWNERS | 7 -- vendor/knative.dev/hack/OWNERS_ALIASES | 16 ---- vendor/knative.dev/pkg/apis/OWNERS | 7 -- vendor/knative.dev/pkg/apis/duck/OWNERS | 7 -- vendor/knative.dev/pkg/configmap/OWNERS | 4 - vendor/knative.dev/pkg/controller/OWNERS | 7 -- vendor/knative.dev/pkg/hack/OWNERS | 10 --- vendor/knative.dev/pkg/hash/OWNERS | 7 -- vendor/knative.dev/pkg/injection/OWNERS | 5 -- vendor/knative.dev/pkg/kmeta/OWNERS | 4 - vendor/knative.dev/pkg/logging/OWNERS | 4 - vendor/knative.dev/pkg/metrics/OWNERS | 4 - vendor/knative.dev/pkg/network/OWNERS | 5 -- vendor/knative.dev/pkg/reconciler/OWNERS | 7 -- vendor/knative.dev/pkg/resolver/OWNERS | 5 -- vendor/knative.dev/pkg/source/OWNERS | 4 - vendor/knative.dev/pkg/test/OWNERS | 10 --- vendor/knative.dev/pkg/test/mako/OWNERS | 5 -- vendor/knative.dev/pkg/tracing/config/OWNERS | 4 - vendor/knative.dev/pkg/webhook/OWNERS | 7 -- vendor/sigs.k8s.io/yaml/OWNERS | 27 ------ 58 files changed, 730 deletions(-) delete mode 100644 vendor/github.com/google/mako/go/quickstore/BUILD delete mode 100644 vendor/github.com/google/mako/internal/go/common/BUILD delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS delete mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS delete mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/OWNERS delete mode 100644 vendor/k8s.io/client-go/rest/OWNERS delete mode 100644 vendor/k8s.io/client-go/tools/auth/OWNERS delete mode 100644 vendor/k8s.io/client-go/tools/cache/OWNERS delete mode 100644 vendor/k8s.io/client-go/tools/leaderelection/OWNERS delete mode 100644 vendor/k8s.io/client-go/tools/metrics/OWNERS delete mode 100644 vendor/k8s.io/client-go/tools/record/OWNERS delete mode 100644 vendor/k8s.io/client-go/transport/OWNERS delete mode 100644 vendor/k8s.io/client-go/util/cert/OWNERS delete mode 100644 vendor/k8s.io/client-go/util/keyutil/OWNERS delete mode 100644 vendor/k8s.io/client-go/util/retry/OWNERS delete mode 100644 vendor/k8s.io/code-generator/OWNERS delete mode 100644 vendor/k8s.io/code-generator/cmd/client-gen/OWNERS delete mode 100644 vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS delete mode 100644 vendor/k8s.io/klog/OWNERS delete mode 100644 vendor/k8s.io/klog/v2/OWNERS delete mode 100644 vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS delete mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS delete mode 100644 vendor/k8s.io/utils/pointer/OWNERS delete mode 100644 vendor/knative.dev/eventing/pkg/channel/OWNERS delete mode 100644 vendor/knative.dev/eventing/test/OWNERS delete mode 100644 vendor/knative.dev/eventing/test/performance/infra/OWNERS delete mode 100644 vendor/knative.dev/eventing/test/test_images/performance/OWNERS delete mode 100644 vendor/knative.dev/hack/OWNERS delete mode 100644 vendor/knative.dev/hack/OWNERS_ALIASES delete mode 100644 vendor/knative.dev/pkg/apis/OWNERS delete mode 100644 vendor/knative.dev/pkg/apis/duck/OWNERS delete mode 100644 vendor/knative.dev/pkg/configmap/OWNERS delete mode 100644 vendor/knative.dev/pkg/controller/OWNERS delete mode 100644 vendor/knative.dev/pkg/hack/OWNERS delete mode 100644 vendor/knative.dev/pkg/hash/OWNERS delete mode 100644 vendor/knative.dev/pkg/injection/OWNERS delete mode 100644 vendor/knative.dev/pkg/kmeta/OWNERS delete mode 100644 vendor/knative.dev/pkg/logging/OWNERS delete mode 100644 vendor/knative.dev/pkg/metrics/OWNERS delete mode 100644 vendor/knative.dev/pkg/network/OWNERS delete mode 100644 vendor/knative.dev/pkg/reconciler/OWNERS delete mode 100644 vendor/knative.dev/pkg/resolver/OWNERS delete mode 100644 vendor/knative.dev/pkg/source/OWNERS delete mode 100644 vendor/knative.dev/pkg/test/OWNERS delete mode 100644 vendor/knative.dev/pkg/test/mako/OWNERS delete mode 100644 vendor/knative.dev/pkg/tracing/config/OWNERS delete mode 100644 vendor/knative.dev/pkg/webhook/OWNERS delete mode 100644 vendor/sigs.k8s.io/yaml/OWNERS diff --git a/vendor/github.com/google/mako/go/quickstore/BUILD b/vendor/github.com/google/mako/go/quickstore/BUILD deleted file mode 100644 index 4623b68ead..0000000000 --- a/vendor/github.com/google/mako/go/quickstore/BUILD +++ /dev/null @@ -1,24 +0,0 @@ -# Quickstore library for Go. -# See quickstore.go and go/mako-quickstore for documentation. -# See quickstore_example_test.go for example usage. -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "quickstore", - srcs = ["quickstore.go"], - importpath = "github.com/google/mako/go/quickstore", - deps = [ - "//internal/go/common", - "//internal/quickstore_microservice/proto:quickstore_go_grpc_pb", - "//internal/quickstore_microservice/proto:quickstore_go_proto", - "//proto/quickstore:quickstore_go_proto", - "//spec/proto:mako_go_proto", - "@com_github_golang_glog//:go_default_library", - "@com_github_golang_protobuf//proto:go_default_library", - "@org_golang_google_grpc//:go_default_library", - ], -) diff --git a/vendor/github.com/google/mako/internal/go/common/BUILD b/vendor/github.com/google/mako/internal/go/common/BUILD deleted file mode 100644 index 1f90d9eda1..0000000000 --- a/vendor/github.com/google/mako/internal/go/common/BUILD +++ /dev/null @@ -1,14 +0,0 @@ -# Go libraries for mako -package(default_visibility = ["//:internal"]) - -licenses(["notice"]) - -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "common", - srcs = [ - "common_deps.go", - ], - importpath = "github.com/google/mako/internal/go/common", -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel deleted file mode 100644 index 5242751fb2..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") -load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") - -package(default_visibility = ["//visibility:public"]) - -proto_library( - name = "internal_proto", - srcs = ["errors.proto"], - deps = ["@com_google_protobuf//:any_proto"], -) - -go_proto_library( - name = "internal_go_proto", - importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", - proto = ":internal_proto", -) - -go_library( - name = "go_default_library", - embed = [":internal_go_proto"], - importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel deleted file mode 100644 index 58b72b9cf7..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel +++ /dev/null @@ -1,85 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package(default_visibility = ["//visibility:public"]) - -go_library( - name = "go_default_library", - srcs = [ - "context.go", - "convert.go", - "doc.go", - "errors.go", - "fieldmask.go", - "handler.go", - "marshal_httpbodyproto.go", - "marshal_json.go", - "marshal_jsonpb.go", - "marshal_proto.go", - "marshaler.go", - "marshaler_registry.go", - "mux.go", - "pattern.go", - "proto2_convert.go", - "proto_errors.go", - "query.go", - ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", - deps = [ - "//internal:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_protobuf//descriptor:go_default_library_gen", - "@com_github_golang_protobuf//jsonpb:go_default_library_gen", - "@com_github_golang_protobuf//proto:go_default_library", - "@go_googleapis//google/api:httpbody_go_proto", - "@io_bazel_rules_go//proto/wkt:any_go_proto", - "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//grpclog:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - ], -) - -go_test( - name = "go_default_test", - size = "small", - srcs = [ - "context_test.go", - "convert_test.go", - "errors_test.go", - "fieldmask_test.go", - "handler_test.go", - "marshal_httpbodyproto_test.go", - "marshal_json_test.go", - "marshal_jsonpb_test.go", - "marshal_proto_test.go", - "marshaler_registry_test.go", - "mux_test.go", - "pattern_test.go", - "query_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//internal:go_default_library", - "//runtime/internal/examplepb:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_protobuf//jsonpb:go_default_library_gen", - "@com_github_golang_protobuf//proto:go_default_library", - "@com_github_golang_protobuf//ptypes:go_default_library_gen", - "@go_googleapis//google/api:httpbody_go_proto", - "@go_googleapis//google/rpc:errdetails_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:empty_go_proto", - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:struct_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - ], -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel deleted file mode 100644 index 7109d79323..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel +++ /dev/null @@ -1,21 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package(default_visibility = ["//visibility:public"]) - -go_library( - name = "go_default_library", - srcs = [ - "doc.go", - "pattern.go", - "readerfactory.go", - "trie.go", - ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities", -) - -go_test( - name = "go_default_test", - size = "small", - srcs = ["trie_test.go"], - embed = [":go_default_library"], -) diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS deleted file mode 100644 index d18a17885b..0000000000 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ /dev/null @@ -1,23 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- brendandburns -- derekwaynecarr -- caesarxuchao -- mikedanese -- liggitt -- nikhiljindal -- gmarek -- erictune -- saad-ali -- janetkuo -- tallclair -- dims -- hongchaodeng -- krousey -- cjcullen diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS deleted file mode 100644 index 68b8d353ca..0000000000 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS +++ /dev/null @@ -1,21 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: -- thockin -- smarterclayton -- wojtek-t -- deads2k -- brendandburns -- derekwaynecarr -- caesarxuchao -- mikedanese -- liggitt -- nikhiljindal -- gmarek -- janetkuo -- ncdc -- dims -- krousey -- resouer -- mfojtik -- jianhuiz diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS deleted file mode 100644 index 7ac0fe11a1..0000000000 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS +++ /dev/null @@ -1,13 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- derekwaynecarr -- mikedanese -- saad-ali -- janetkuo -- xiang90 -- mbohlool diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS deleted file mode 100644 index 15b4c875a3..0000000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ /dev/null @@ -1,31 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: -- thockin -- smarterclayton -- wojtek-t -- deads2k -- brendandburns -- caesarxuchao -- liggitt -- nikhiljindal -- gmarek -- erictune -- davidopp -- sttts -- quinton-hoole -- luxas -- janetkuo -- justinsb -- ncdc -- soltysh -- dims -- madhusudancs -- hongchaodeng -- krousey -- mml -- mbohlool -- therc -- mqliang -- kevin-wangzefeng -- jianhuiz diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS deleted file mode 100644 index 3f72c69ba3..0000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- pwittrock -reviewers: -- mengqiy -- apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS deleted file mode 100644 index cfee199fa0..0000000000 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- pwittrock -- mengqiy -reviewers: -- mengqiy -- apelisse diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS deleted file mode 100644 index 3f72c69ba3..0000000000 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- pwittrock -reviewers: -- mengqiy -- apelisse diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS deleted file mode 100644 index e0ec62deb2..0000000000 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS +++ /dev/null @@ -1,9 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -# approval on api packages bubbles to api-approvers -reviewers: -- sig-auth-authenticators-approvers -- sig-auth-authenticators-reviewers -labels: -- sig/auth - diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS deleted file mode 100644 index 97fcd3dd30..0000000000 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- cjcullen -- jlowdermilk -reviewers: -- cjcullen -- jlowdermilk diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/OWNERS b/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/OWNERS deleted file mode 100644 index 03fb44ea24..0000000000 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/oidc/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- ericchiang -reviewers: -- ericchiang -- rithujohn191 diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS deleted file mode 100644 index c02ec6a250..0000000000 --- a/vendor/k8s.io/client-go/rest/OWNERS +++ /dev/null @@ -1,25 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: -- thockin -- smarterclayton -- caesarxuchao -- wojtek-t -- deads2k -- brendandburns -- liggitt -- nikhiljindal -- gmarek -- erictune -- sttts -- luxas -- dims -- errordeveloper -- hongchaodeng -- krousey -- resouer -- cjcullen -- rmmh -- asalkeld -- juanvallejo -- lojies diff --git a/vendor/k8s.io/client-go/tools/auth/OWNERS b/vendor/k8s.io/client-go/tools/auth/OWNERS deleted file mode 100644 index 3e05d309be..0000000000 --- a/vendor/k8s.io/client-go/tools/auth/OWNERS +++ /dev/null @@ -1,9 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- sig-auth-authenticators-approvers -reviewers: -- sig-auth-authenticators-reviewers -labels: -- sig/auth - diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS deleted file mode 100644 index 7bbe635426..0000000000 --- a/vendor/k8s.io/client-go/tools/cache/OWNERS +++ /dev/null @@ -1,43 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- caesarxuchao -- liggitt -- ncdc -reviewers: -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- brendandburns -- derekwaynecarr -- caesarxuchao -- mikedanese -- liggitt -- nikhiljindal -- erictune -- davidopp -- pmorie -- janetkuo -- justinsb -- soltysh -- jsafrane -- dims -- madhusudancs -- hongchaodeng -- krousey -- xiang90 -- mml -- ingvagabund -- resouer -- jessfraz -- mfojtik -- mqliang -- sdminonne -- ncdc diff --git a/vendor/k8s.io/client-go/tools/leaderelection/OWNERS b/vendor/k8s.io/client-go/tools/leaderelection/OWNERS deleted file mode 100644 index 9ece5e1ea4..0000000000 --- a/vendor/k8s.io/client-go/tools/leaderelection/OWNERS +++ /dev/null @@ -1,13 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- mikedanese -- timothysc -reviewers: -- wojtek-t -- deads2k -- mikedanese -- gmarek -- timothysc -- ingvagabund -- resouer diff --git a/vendor/k8s.io/client-go/tools/metrics/OWNERS b/vendor/k8s.io/client-go/tools/metrics/OWNERS deleted file mode 100644 index 77bcb5090c..0000000000 --- a/vendor/k8s.io/client-go/tools/metrics/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: -- wojtek-t -- krousey -- jayunit100 diff --git a/vendor/k8s.io/client-go/tools/record/OWNERS b/vendor/k8s.io/client-go/tools/record/OWNERS deleted file mode 100644 index 792f356b0d..0000000000 --- a/vendor/k8s.io/client-go/tools/record/OWNERS +++ /dev/null @@ -1,28 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- derekwaynecarr -- caesarxuchao -- vishh -- mikedanese -- liggitt -- nikhiljindal -- erictune -- pmorie -- dchen1107 -- saad-ali -- luxas -- yifan-gu -- mwielgus -- timothysc -- jsafrane -- dims -- krousey -- a-robinson -- aveshagarwal -- resouer -- cjcullen diff --git a/vendor/k8s.io/client-go/transport/OWNERS b/vendor/k8s.io/client-go/transport/OWNERS deleted file mode 100644 index a52176903c..0000000000 --- a/vendor/k8s.io/client-go/transport/OWNERS +++ /dev/null @@ -1,9 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: -- smarterclayton -- wojtek-t -- deads2k -- liggitt -- krousey -- caesarxuchao diff --git a/vendor/k8s.io/client-go/util/cert/OWNERS b/vendor/k8s.io/client-go/util/cert/OWNERS deleted file mode 100644 index 3cf0364383..0000000000 --- a/vendor/k8s.io/client-go/util/cert/OWNERS +++ /dev/null @@ -1,9 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- sig-auth-certificates-approvers -reviewers: -- sig-auth-certificates-reviewers -labels: -- sig/auth - diff --git a/vendor/k8s.io/client-go/util/keyutil/OWNERS b/vendor/k8s.io/client-go/util/keyutil/OWNERS deleted file mode 100644 index 470b7a1c92..0000000000 --- a/vendor/k8s.io/client-go/util/keyutil/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -approvers: -- sig-auth-certificates-approvers -reviewers: -- sig-auth-certificates-reviewers -labels: -- sig/auth - diff --git a/vendor/k8s.io/client-go/util/retry/OWNERS b/vendor/k8s.io/client-go/util/retry/OWNERS deleted file mode 100644 index dec3e88d63..0000000000 --- a/vendor/k8s.io/client-go/util/retry/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -reviewers: -- caesarxuchao diff --git a/vendor/k8s.io/code-generator/OWNERS b/vendor/k8s.io/code-generator/OWNERS deleted file mode 100644 index 6f7abe3edb..0000000000 --- a/vendor/k8s.io/code-generator/OWNERS +++ /dev/null @@ -1,13 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- lavalamp -- wojtek-t -- sttts -reviewers: -- lavalamp -- wojtek-t -- sttts -labels: -- sig/api-machinery -- area/code-generation diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS b/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS deleted file mode 100644 index 62866d0b19..0000000000 --- a/vendor/k8s.io/code-generator/cmd/client-gen/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- lavalamp -- wojtek-t -- caesarxuchao -reviewers: -- lavalamp -- wojtek-t -- caesarxuchao diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS deleted file mode 100644 index 613659162a..0000000000 --- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/OWNERS +++ /dev/null @@ -1,6 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- smarterclayton -reviewers: -- smarterclayton diff --git a/vendor/k8s.io/klog/OWNERS b/vendor/k8s.io/klog/OWNERS deleted file mode 100644 index 380e514f28..0000000000 --- a/vendor/k8s.io/klog/OWNERS +++ /dev/null @@ -1,19 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners -reviewers: - - jayunit100 - - hoegaarden - - andyxning - - neolit123 - - pohly - - yagonobre - - vincepri - - detiber -approvers: - - dims - - thockin - - justinsb - - tallclair - - piosz - - brancz - - DirectXMan12 - - lavalamp diff --git a/vendor/k8s.io/klog/v2/OWNERS b/vendor/k8s.io/klog/v2/OWNERS deleted file mode 100644 index 380e514f28..0000000000 --- a/vendor/k8s.io/klog/v2/OWNERS +++ /dev/null @@ -1,19 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners -reviewers: - - jayunit100 - - hoegaarden - - andyxning - - neolit123 - - pohly - - yagonobre - - vincepri - - detiber -approvers: - - dims - - thockin - - justinsb - - tallclair - - piosz - - brancz - - DirectXMan12 - - lavalamp diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS b/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS deleted file mode 100644 index 235bc545b8..0000000000 --- a/vendor/k8s.io/kube-openapi/pkg/generators/rules/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -reviewers: -- roycaihw -approvers: -- roycaihw diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS b/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS deleted file mode 100644 index 9621a6a3a4..0000000000 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/OWNERS +++ /dev/null @@ -1,2 +0,0 @@ -approvers: -- apelisse diff --git a/vendor/k8s.io/utils/pointer/OWNERS b/vendor/k8s.io/utils/pointer/OWNERS deleted file mode 100644 index 0d6392752a..0000000000 --- a/vendor/k8s.io/utils/pointer/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- apelisse -- stewart-yu -- thockin -reviewers: -- apelisse -- stewart-yu -- thockin diff --git a/vendor/knative.dev/eventing/pkg/channel/OWNERS b/vendor/knative.dev/eventing/pkg/channel/OWNERS deleted file mode 100644 index 05ce860f94..0000000000 --- a/vendor/knative.dev/eventing/pkg/channel/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -reviewers: -- channel-reviewers - -labels: -- area/pkg-channel diff --git a/vendor/knative.dev/eventing/test/OWNERS b/vendor/knative.dev/eventing/test/OWNERS deleted file mode 100644 index c50adc8493..0000000000 --- a/vendor/knative.dev/eventing/test/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- productivity-approvers - -reviewers: -- productivity-reviewers - -labels: -- area/test-and-release diff --git a/vendor/knative.dev/eventing/test/performance/infra/OWNERS b/vendor/knative.dev/eventing/test/performance/infra/OWNERS deleted file mode 100644 index 0cfccadd87..0000000000 --- a/vendor/knative.dev/eventing/test/performance/infra/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- performance-approvers - -reviewers: -- performance-reviewers - -labels: -- area/performance diff --git a/vendor/knative.dev/eventing/test/test_images/performance/OWNERS b/vendor/knative.dev/eventing/test/test_images/performance/OWNERS deleted file mode 100644 index 0cfccadd87..0000000000 --- a/vendor/knative.dev/eventing/test/test_images/performance/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- performance-approvers - -reviewers: -- performance-reviewers - -labels: -- area/performance diff --git a/vendor/knative.dev/hack/OWNERS b/vendor/knative.dev/hack/OWNERS deleted file mode 100644 index d76f0af39e..0000000000 --- a/vendor/knative.dev/hack/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -approvers: - - technical-oversight-committee - - productivity-approvers - -reviewers: - - productivity-approvers - - productivity-reviewers diff --git a/vendor/knative.dev/hack/OWNERS_ALIASES b/vendor/knative.dev/hack/OWNERS_ALIASES deleted file mode 100644 index 84bbd0d3b7..0000000000 --- a/vendor/knative.dev/hack/OWNERS_ALIASES +++ /dev/null @@ -1,16 +0,0 @@ -aliases: - technical-oversight-committee: - - evankanderson - - grantr - - markusthoemmes - - mattmoor - - tcnghia - productivity-approvers: - - chaodaiG - - chizhg - - coryrc - - n3wscott - productivity-reviewers: - - steuhs - - peterfeifanchen - - efiturri diff --git a/vendor/knative.dev/pkg/apis/OWNERS b/vendor/knative.dev/pkg/apis/OWNERS deleted file mode 100644 index 1227b2b9e8..0000000000 --- a/vendor/knative.dev/pkg/apis/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- apis-approvers - -reviewers: -- apis-reviewers diff --git a/vendor/knative.dev/pkg/apis/duck/OWNERS b/vendor/knative.dev/pkg/apis/duck/OWNERS deleted file mode 100644 index 8df611ba00..0000000000 --- a/vendor/knative.dev/pkg/apis/duck/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- apis-duck-approvers - -reviewers: -- apis-duck-reviewers diff --git a/vendor/knative.dev/pkg/configmap/OWNERS b/vendor/knative.dev/pkg/configmap/OWNERS deleted file mode 100644 index 2480fc6d43..0000000000 --- a/vendor/knative.dev/pkg/configmap/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- configmap-approvers diff --git a/vendor/knative.dev/pkg/controller/OWNERS b/vendor/knative.dev/pkg/controller/OWNERS deleted file mode 100644 index 0b270d53af..0000000000 --- a/vendor/knative.dev/pkg/controller/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- controller-approvers - -reviewers: -- controller-reviewers diff --git a/vendor/knative.dev/pkg/hack/OWNERS b/vendor/knative.dev/pkg/hack/OWNERS deleted file mode 100644 index c50adc8493..0000000000 --- a/vendor/knative.dev/pkg/hack/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- productivity-approvers - -reviewers: -- productivity-reviewers - -labels: -- area/test-and-release diff --git a/vendor/knative.dev/pkg/hash/OWNERS b/vendor/knative.dev/pkg/hash/OWNERS deleted file mode 100644 index 0b270d53af..0000000000 --- a/vendor/knative.dev/pkg/hash/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- controller-approvers - -reviewers: -- controller-reviewers diff --git a/vendor/knative.dev/pkg/injection/OWNERS b/vendor/knative.dev/pkg/injection/OWNERS deleted file mode 100644 index dda47512a4..0000000000 --- a/vendor/knative.dev/pkg/injection/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- mattmoor -- n3wscott diff --git a/vendor/knative.dev/pkg/kmeta/OWNERS b/vendor/knative.dev/pkg/kmeta/OWNERS deleted file mode 100644 index 29b0d9f256..0000000000 --- a/vendor/knative.dev/pkg/kmeta/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- kmeta-approvers diff --git a/vendor/knative.dev/pkg/logging/OWNERS b/vendor/knative.dev/pkg/logging/OWNERS deleted file mode 100644 index fa4854ba0a..0000000000 --- a/vendor/knative.dev/pkg/logging/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- logging-approvers diff --git a/vendor/knative.dev/pkg/metrics/OWNERS b/vendor/knative.dev/pkg/metrics/OWNERS deleted file mode 100644 index 6d3966df44..0000000000 --- a/vendor/knative.dev/pkg/metrics/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- metrics-approvers diff --git a/vendor/knative.dev/pkg/network/OWNERS b/vendor/knative.dev/pkg/network/OWNERS deleted file mode 100644 index b14ac5308e..0000000000 --- a/vendor/knative.dev/pkg/network/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- tcnghia -- vagababov diff --git a/vendor/knative.dev/pkg/reconciler/OWNERS b/vendor/knative.dev/pkg/reconciler/OWNERS deleted file mode 100644 index 0b270d53af..0000000000 --- a/vendor/knative.dev/pkg/reconciler/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- controller-approvers - -reviewers: -- controller-reviewers diff --git a/vendor/knative.dev/pkg/resolver/OWNERS b/vendor/knative.dev/pkg/resolver/OWNERS deleted file mode 100644 index acf2ee2c1c..0000000000 --- a/vendor/knative.dev/pkg/resolver/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- n3wscott -- vaikas-google diff --git a/vendor/knative.dev/pkg/source/OWNERS b/vendor/knative.dev/pkg/source/OWNERS deleted file mode 100644 index 8d16912160..0000000000 --- a/vendor/knative.dev/pkg/source/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- source-approvers diff --git a/vendor/knative.dev/pkg/test/OWNERS b/vendor/knative.dev/pkg/test/OWNERS deleted file mode 100644 index c50adc8493..0000000000 --- a/vendor/knative.dev/pkg/test/OWNERS +++ /dev/null @@ -1,10 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- productivity-approvers - -reviewers: -- productivity-reviewers - -labels: -- area/test-and-release diff --git a/vendor/knative.dev/pkg/test/mako/OWNERS b/vendor/knative.dev/pkg/test/mako/OWNERS deleted file mode 100644 index e3eb2fca5d..0000000000 --- a/vendor/knative.dev/pkg/test/mako/OWNERS +++ /dev/null @@ -1,5 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- vagababov - diff --git a/vendor/knative.dev/pkg/tracing/config/OWNERS b/vendor/knative.dev/pkg/tracing/config/OWNERS deleted file mode 100644 index 2480fc6d43..0000000000 --- a/vendor/knative.dev/pkg/tracing/config/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- configmap-approvers diff --git a/vendor/knative.dev/pkg/webhook/OWNERS b/vendor/knative.dev/pkg/webhook/OWNERS deleted file mode 100644 index 001df221fe..0000000000 --- a/vendor/knative.dev/pkg/webhook/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# The OWNERS file is used by prow to automatically merge approved PRs. - -approvers: -- webhook-approvers - -reviewers: -- webhook-reviewers diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS deleted file mode 100644 index 325b40b076..0000000000 --- a/vendor/sigs.k8s.io/yaml/OWNERS +++ /dev/null @@ -1,27 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- dims -- lavalamp -- smarterclayton -- deads2k -- sttts -- liggitt -- caesarxuchao -reviewers: -- dims -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- derekwaynecarr -- caesarxuchao -- mikedanese -- liggitt -- gmarek -- sttts -- ncdc -- tallclair -labels: -- sig/api-machinery From b563a2210aaa54e350a9f9f0e5b68fa345beeae7 Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Fri, 12 Mar 2021 15:41:17 +0100 Subject: [PATCH 09/17] Fix conumer handler test Signed-off-by: Ahmed Abdalla --- pkg/common/consumer/consumer_handler_test.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/common/consumer/consumer_handler_test.go b/pkg/common/consumer/consumer_handler_test.go index 14953e6c1a..4d8fdac959 100644 --- a/pkg/common/consumer/consumer_handler_test.go +++ b/pkg/common/consumer/consumer_handler_test.go @@ -117,6 +117,10 @@ func (m mockMessageHandler) Handle(ctx context.Context, message *sarama.Consumer func (m mockMessageHandler) SetReady(ready bool) { } +func (m mockMessageHandler) GetConsumerGroup() string { + return "consumer group" +} + //------ Tests func Test(t *testing.T) { From 6aadc0b57df41abd9d13949bac9e47cecc0c0016 Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Fri, 12 Mar 2021 17:22:39 +0100 Subject: [PATCH 10/17] remove unused hashes from status probing test Signed-off-by: Ahmed Abdalla --- pkg/channel/consolidated/status/status_test.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/pkg/channel/consolidated/status/status_test.go b/pkg/channel/consolidated/status/status_test.go index 08f213ee9b..f750bd7b41 100644 --- a/pkg/channel/consolidated/status/status_test.go +++ b/pkg/channel/consolidated/status/status_test.go @@ -61,7 +61,7 @@ var ( const dispatcherReadySubHeader = "K-Subscriber-Status" func getURL(s string) *apis.URL { - u, _ := apis.ParseURL("http://subscr.ns.local") + u, _ := apis.ParseURL(s) return u } @@ -93,19 +93,12 @@ type ReadyPair struct { s eventingduckv1.SubscriberSpec } -const HashHeaderName = "K-Network-Hash" - func TestProbeSinglePod(t *testing.T) { var succeed atomic.Bool ch := channelTemplate.DeepCopy() sub := subscriptionTemplate.DeepCopy() - hash, err := computeHash(*sub.DeepCopy()) - if err != nil { - t.Fatal("Failed to compute hash:", err) - } - probeHandler := http.HandlerFunc(handleProbe(t)) // Probes only succeed if succeed is true @@ -117,8 +110,6 @@ func TestProbeSinglePod(t *testing.T) { return } - // TODO Move const to dispatcher - r.Header.Set(HashHeaderName, fmt.Sprintf("%x", hash)) probeHandler.ServeHTTP(w, r) }) From 083860ebffec73376ba5b8b2ab61dcabc75ecea9 Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Tue, 16 Mar 2021 23:41:20 +0100 Subject: [PATCH 11/17] Apply review comments and add a prober test Signed-off-by: Ahmed Abdalla --- .../reconciler/controller/controller.go | 4 +- .../reconciler/controller/kafkachannel.go | 5 +- pkg/channel/consolidated/status/status.go | 51 +++++-------------- .../consolidated/status/status_test.go | 28 +++++++++- 4 files changed, 47 insertions(+), 41 deletions(-) diff --git a/pkg/channel/consolidated/reconciler/controller/controller.go b/pkg/channel/consolidated/reconciler/controller/controller.go index 55f4ed197e..32b10cb911 100644 --- a/pkg/channel/consolidated/reconciler/controller/controller.go +++ b/pkg/channel/consolidated/reconciler/controller/controller.go @@ -156,10 +156,10 @@ func NewController( // Cancel probing when a Pod is deleted DeleteFunc: func(obj interface{}) { pod, ok := obj.(*corev1.Pod) - if ok { + if ok && pod != nil { logger.Debugw("Dispatcher pod deleted. Canceling pod probing.", zap.String("pod", pod.GetName())) - statusProber.CancelPodProbing(pod) + statusProber.CancelPodProbing(*pod) impl.GlobalResync(kafkaChannelInformer.Informer()) } }, diff --git a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go index 59ad71b601..ff43bd6698 100644 --- a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go +++ b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go @@ -149,7 +149,6 @@ func (r *Reconciler) ReconcileKind(ctx context.Context, kc *v1beta1.KafkaChannel logger.Errorw("Invalid kafka channel", zap.String("channel", kc.Name), zap.Error(err)) return err } - if r.kafkaConfig == nil { if r.kafkaConfigError == nil { r.kafkaConfigError = fmt.Errorf("the config map '%s' does not exist", constants.SettingsConfigMapName) @@ -586,5 +585,9 @@ func (r *Reconciler) FinalizeKind(ctx context.Context, kc *v1beta1.KafkaChannel) //if r.consumerGroupWatcher != nil { // r.consumerGroupWatcher.Forget(string(kc.ObjectMeta.UID)) //} + for _, s := range kc.Spec.Subscribers { + r.statusManager.CancelProbing(s) + } + return newReconciledNormal(kc.Namespace, kc.Name) //ok to remove finalizer } diff --git a/pkg/channel/consolidated/status/status.go b/pkg/channel/consolidated/status/status.go index 18b3be9ab4..bb1d53e715 100644 --- a/pkg/channel/consolidated/status/status.go +++ b/pkg/channel/consolidated/status/status.go @@ -19,7 +19,6 @@ package status import ( "context" "crypto/sha256" - "crypto/tls" "encoding/json" "fmt" "net" @@ -58,9 +57,8 @@ var dialContext = (&net.Dialer{Timeout: probeTimeout}).DialContext // targetState represents the probing state of a subscription type targetState struct { - hash string - sub eventingduckv1.SubscriberSpec - ch messagingv1beta1.KafkaChannel + sub eventingduckv1.SubscriberSpec + ch messagingv1beta1.KafkaChannel // pendingCount is the number of pods that haven't been successfully probed yet pendingCount atomic.Int32 @@ -113,6 +111,8 @@ type ProbeTargetLister interface { // Manager provides a way to check if an Ingress is ready type Manager interface { IsReady(ctx context.Context, ch messagingv1beta1.KafkaChannel, sub eventingduckv1.SubscriberSpec) (bool, error) + CancelProbing(sub eventingduckv1.SubscriberSpec) + CancelPodProbing(pod corev1.Pod) } // Prober provides a way to check if a VirtualService is ready by probing the Envoy pods @@ -187,19 +187,13 @@ func (m *Prober) IsReady(ctx context.Context, ch messagingv1beta1.KafkaChannel, subscriptionKey := sub.UID logger := logging.FromContext(ctx) - bytes, err := computeHash(sub) - if err != nil { - return false, fmt.Errorf("failed to compute the hash of the Subscription: %w", err) - } - hash := fmt.Sprintf("%x", bytes) - if ready, ok := func() (bool, bool) { m.mu.Lock() defer m.mu.Unlock() if state, ok := m.targetStates[subscriptionKey]; ok { - if state.hash == hash { + if state.sub.Generation == sub.Generation { state.lastAccessed = time.Now() - logger.Debugw("Subscription is hashed. Checking readiness", + logger.Debugw("Subscription is cached. Checking readiness", zap.Any("subscription", sub.UID)) return m.checkReadiness(state), true } @@ -215,7 +209,6 @@ func (m *Prober) IsReady(ctx context.Context, ch messagingv1beta1.KafkaChannel, subCtx, cancel := context.WithCancel(context.Background()) subscriptionState := &targetState{ - hash: hash, sub: sub, ch: ch, lastAccessed: time.Now(), @@ -350,17 +343,13 @@ func (m *Prober) CancelProbing(sub eventingduckv1.SubscriberSpec) { } // CancelPodProbing cancels probing of the provided Pod IP. -// -// TODO(#6269): make this cancellation based on Pod x port instead of just Pod. -func (m *Prober) CancelPodProbing(obj interface{}) { - if pod, ok := obj.(*corev1.Pod); ok { - m.mu.Lock() - defer m.mu.Unlock() +func (m *Prober) CancelPodProbing(pod corev1.Pod) { + m.mu.Lock() + defer m.mu.Unlock() - if ctx, ok := m.podContexts[pod.Status.PodIP]; ok { - ctx.cancel() - delete(m.podContexts, pod.Status.PodIP) - } + if ctx, ok := m.podContexts[pod.Status.PodIP]; ok { + ctx.cancel() + delete(m.podContexts, pod.Status.PodIP) } } @@ -384,17 +373,9 @@ func (m *Prober) processWorkItem() bool { item.url, item.podIP, item.podPort, m.workQueue.Len()) transport := http.DefaultTransport.(*http.Transport).Clone() - transport.TLSClientConfig = &tls.Config{ - //nolint:gosec - // We only want to know that the Gateway is configured, not that the configuration is valid. - // Therefore, we can safely ignore any TLS certificate validation. - InsecureSkipVerify: true, - } + transport.DialContext = func(ctx context.Context, network, addr string) (conn net.Conn, e error) { - // Requests with the IP as hostname and the Host header set do no pass client-side validation - // because the HTTP client validates that the hostname (not the Host header) matches the server - // TLS certificate Common Name or Alternative Names. Therefore, http.Request.URL is set to the - // hostname and it is substituted it here with the target IP. + // http.Request.URL is set to the hostname and it is substituted in here with the target IP. return dialContext(ctx, network, net.JoinHostPort(item.podIP, item.podPort)) } @@ -470,9 +451,6 @@ func (m *Prober) probeVerifier(item *workItem) prober.Verifier { zap.ByteString("body", b)) switch r.StatusCode { case http.StatusOK: - /** - {"my-kafka-channel":["90713ffd-f527-42bf-b158-57630b68ebe2","a2041ec2-3295-4cd8-ac31-e699ab08273e","d3d70a79-8528-4df6-a812-3b559380cf08","db536b74-45f8-41cd-ab3e-7e3f60ed9e35","eb3aeee9-7cb5-4cad-b4c4-424e436dac9f"]} - */ var subscriptions = make(map[string][]string) err := json.Unmarshal(b, &subscriptions) if err != nil { @@ -491,7 +469,6 @@ func (m *Prober) probeVerifier(item *workItem) prober.Verifier { item.targetStates.readyCount.Inc() return true, nil } else { - //TODO return and error if the channel doesn't exist? return false, nil } case http.StatusNotFound, http.StatusServiceUnavailable: diff --git a/pkg/channel/consolidated/status/status_test.go b/pkg/channel/consolidated/status/status_test.go index f750bd7b41..c926485a2e 100644 --- a/pkg/channel/consolidated/status/status_test.go +++ b/pkg/channel/consolidated/status/status_test.go @@ -183,6 +183,32 @@ func TestProbeSinglePod(t *testing.T) { } } +func TestProbeListerFail(t *testing.T) { + ch := channelTemplate.DeepCopy() + sub := subscriptionTemplate.DeepCopy() + + ready := make(chan *ReadyPair) + defer close(ready) + prober := NewProber( + zaptest.NewLogger(t).Sugar(), + notFoundLister{}, + func(c v1beta1.KafkaChannel, s eventingduckv1.SubscriberSpec) { + ready <- &ReadyPair{ + c, + s, + } + }) + + // If we can't list, this must fail and return false + ok, err := prober.IsReady(context.Background(), *ch, *sub) + if err == nil { + t.Fatal("IsReady returned unexpected success") + } + if ok { + t.Fatal("IsReady() returned true") + } +} + type fakeProbeTargetLister ProbeTarget func (l fakeProbeTargetLister) ListProbeTargets(ctx context.Context, kc messagingv1beta1.KafkaChannel) (*ProbeTarget, error) { @@ -192,6 +218,6 @@ func (l fakeProbeTargetLister) ListProbeTargets(ctx context.Context, kc messagin type notFoundLister struct{} -func (l notFoundLister) ListProbeTargets(ctx context.Context, obj interface{}) ([]ProbeTarget, error) { +func (l notFoundLister) ListProbeTargets(ctx context.Context, kc messagingv1beta1.KafkaChannel) (*ProbeTarget, error) { return nil, errors.New("not found") } From 6cc620ccf8673683157ac2c0e15b9ddc8b124f28 Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Tue, 16 Mar 2021 23:43:10 +0100 Subject: [PATCH 12/17] Remove old comment Signed-off-by: Ahmed Abdalla --- .../consolidated/reconciler/controller/kafkachannel.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go index ff43bd6698..5f462ccea8 100644 --- a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go +++ b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go @@ -581,10 +581,7 @@ func (r *Reconciler) FinalizeKind(ctx context.Context, kc *v1beta1.KafkaChannel) return err } } - //TODO - //if r.consumerGroupWatcher != nil { - // r.consumerGroupWatcher.Forget(string(kc.ObjectMeta.UID)) - //} + for _, s := range kc.Spec.Subscribers { r.statusManager.CancelProbing(s) } From 90a6e2601b3b45e720689d0d6d965b77d93c861c Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Tue, 16 Mar 2021 23:51:10 +0100 Subject: [PATCH 13/17] Fix fake status manager Signed-off-by: Ahmed Abdalla --- .../reconciler/controller/kafkachannel_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkg/channel/consolidated/reconciler/controller/kafkachannel_test.go b/pkg/channel/consolidated/reconciler/controller/kafkachannel_test.go index a67aa6f650..4d4454c1e3 100644 --- a/pkg/channel/consolidated/reconciler/controller/kafkachannel_test.go +++ b/pkg/channel/consolidated/reconciler/controller/kafkachannel_test.go @@ -860,6 +860,14 @@ func (m *fakeStatusManager) IsReady(ctx context.Context, ch v1beta1.KafkaChannel return m.FakeIsReady(ctx, ch, sub) } +func (m *fakeStatusManager) CancelProbing(sub eventingduckv1.SubscriberSpec) { + //do nothing +} + +func (m *fakeStatusManager) CancelPodProbing(pod corev1.Pod) { + //do nothing +} + func makePatch(namespace, name, patch string) clientgotesting.PatchActionImpl { return clientgotesting.PatchActionImpl{ ActionImpl: clientgotesting.ActionImpl{ From c7c423c76333e42d663b98a9814155cc402a29c4 Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Wed, 17 Mar 2021 00:10:53 +0100 Subject: [PATCH 14/17] Return error if IsReady returns an error Signed-off-by: Ahmed Abdalla --- .../consolidated/reconciler/controller/kafkachannel.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go index 5f462ccea8..82f01bba7f 100644 --- a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go +++ b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go @@ -254,7 +254,11 @@ func (r *Reconciler) setupSubscriptionStatusWatcher(ctx context.Context, ch *v1b after.Status.Subscribers = make([]v1.SubscriberStatus, 0) for _, s := range ch.Spec.Subscribers { - if r, _ := r.statusManager.IsReady(ctx, *ch, s); r { + r, err := r.statusManager.IsReady(ctx, *ch, s) + if err != nil { + return fmt.Errorf("failed checking subscription readiness: %v", err) + } + if r { logging.FromContext(ctx).Debugw("marking subscription", zap.Any("subscription", s)) after.Status.Subscribers = append(after.Status.Subscribers, v1.SubscriberStatus{ UID: s.UID, From af33321db02b133a32728fae5ab95caa1a5fa3b9 Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Wed, 17 Mar 2021 22:52:13 +0100 Subject: [PATCH 15/17] Change probing to be partition based and fix some corner cases of channel deletion --- .../dispatcher/consumer_message_handler.go | 4 +- .../consolidated/dispatcher/dispatcher.go | 43 ++++-- .../dispatcher/dispatcher_test.go | 138 +++++++++++++----- .../dispatcher/kafka_subscription.go | 22 ++- .../reconciler/controller/kafkachannel.go | 20 ++- .../reconciler/dispatcher/kafkachannel.go | 10 ++ pkg/channel/consolidated/status/status.go | 37 ++--- .../consolidated/status/status_test.go | 9 +- pkg/common/consumer/consumer_handler.go | 16 +- pkg/common/consumer/consumer_handler_test.go | 2 +- pkg/source/adapter/adapter.go | 2 +- 11 files changed, 203 insertions(+), 100 deletions(-) diff --git a/pkg/channel/consolidated/dispatcher/consumer_message_handler.go b/pkg/channel/consolidated/dispatcher/consumer_message_handler.go index 7bc8dcadce..5b01e6b916 100644 --- a/pkg/channel/consolidated/dispatcher/consumer_message_handler.go +++ b/pkg/channel/consolidated/dispatcher/consumer_message_handler.go @@ -43,8 +43,8 @@ func (c consumerMessageHandler) GetConsumerGroup() string { return c.consumerGroup } -func (c consumerMessageHandler) SetReady(ready bool) { - c.kafkaSubscription.SetReady(c.sub.UID, ready) +func (c consumerMessageHandler) SetReady(partition int32, ready bool) { + c.kafkaSubscription.SetReady(c.sub.UID, partition, ready) } func (c consumerMessageHandler) Handle(ctx context.Context, consumerMessage *sarama.ConsumerMessage) (bool, error) { diff --git a/pkg/channel/consolidated/dispatcher/dispatcher.go b/pkg/channel/consolidated/dispatcher/dispatcher.go index d72c0a61d9..46c4e4551c 100644 --- a/pkg/channel/consolidated/dispatcher/dispatcher.go +++ b/pkg/channel/consolidated/dispatcher/dispatcher.go @@ -25,11 +25,6 @@ import ( "sync" "sync/atomic" - "k8s.io/apimachinery/pkg/util/sets" - - "knative.dev/eventing-kafka/pkg/common/client" - "knative.dev/eventing-kafka/pkg/common/tracing" - "github.com/Shopify/sarama" protocolkafka "github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2" "github.com/cloudevents/sdk-go/v2/binding" @@ -37,13 +32,17 @@ import ( "go.opencensus.io/trace" "go.uber.org/zap" "k8s.io/apimachinery/pkg/types" - eventingchannels "knative.dev/eventing/pkg/channel" - "knative.dev/eventing/pkg/kncloudevents" - "knative.dev/pkg/kmeta" + "k8s.io/apimachinery/pkg/util/sets" + "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" "knative.dev/eventing-kafka/pkg/channel/consolidated/utils" "knative.dev/eventing-kafka/pkg/channel/distributed/common/env" + "knative.dev/eventing-kafka/pkg/common/client" "knative.dev/eventing-kafka/pkg/common/consumer" + "knative.dev/eventing-kafka/pkg/common/tracing" + eventingchannels "knative.dev/eventing/pkg/channel" + "knative.dev/eventing/pkg/kncloudevents" + "knative.dev/pkg/kmeta" ) const ( @@ -191,9 +190,11 @@ func (d *KafkaDispatcher) ServeHTTP(w nethttp.ResponseWriter, r *nethttp.Request } d.channelSubscriptions[channelRef].readySubscriptionsLock.RLock() defer d.channelSubscriptions[channelRef].readySubscriptionsLock.RUnlock() - var subscriptions = make(map[string][]string) + var subscriptions = make(map[string][]int32) w.Header().Set(dispatcherReadySubHeader, channelRefName) - subscriptions[channelRefNamespace+"/"+channelRefName] = d.channelSubscriptions[channelRef].channelReadySubscriptions.List() + for s, ps := range d.channelSubscriptions[channelRef].channelReadySubscriptions { + subscriptions[s] = ps.List() + } jsonResult, err := json.Marshal(subscriptions) if err != nil { d.logger.Errorf("Error marshalling json for sub-status channelref: %s/%s, %w", channelRefNamespace, channelRefName, err) @@ -236,7 +237,7 @@ func (d *KafkaDispatcher) UpdateKafkaConsumers(config *Config) (map[types.UID]er d.channelSubscriptions[channelRef] = &KafkaSubscription{ logger: d.logger, subs: []types.UID{}, - channelReadySubscriptions: sets.String{}, + channelReadySubscriptions: map[string]sets.Int32{}, } } @@ -369,6 +370,26 @@ func (d *KafkaDispatcher) getChannelReferenceFromHost(host string) (eventingchan return cr, nil } +func (d *KafkaDispatcher) CleanupChannel(ctx context.Context, kc *v1beta1.KafkaChannel) { + d.consumerUpdateLock.Lock() + defer d.consumerUpdateLock.Unlock() + channelRef := eventingchannels.ChannelReference{ + Name: kc.GetName(), + Namespace: kc.GetNamespace(), + } + d.logger.Infow("Cleaning up KafkaChannel cached resources", zap.Any("kafkachannel", channelRef)) + if kafkaSub, ok := d.channelSubscriptions[channelRef]; ok { + for _, s := range kafkaSub.subs { + if c, ok := d.subsConsumerGroups[s]; ok { + delete(d.subsConsumerGroups, s) + d.logger.Debugw("Closing cached consumer group", zap.Any("consumer group", c)) + c.Close() + } + } + delete(d.channelSubscriptions, channelRef) + } +} + func uidSetDifference(a, b []types.UID) (diff []types.UID) { m := make(map[types.UID]bool) diff --git a/pkg/channel/consolidated/dispatcher/dispatcher_test.go b/pkg/channel/consolidated/dispatcher/dispatcher_test.go index 129927f6cc..f968bba865 100644 --- a/pkg/channel/consolidated/dispatcher/dispatcher_test.go +++ b/pkg/channel/consolidated/dispatcher/dispatcher_test.go @@ -451,52 +451,109 @@ func TestSetReady(t *testing.T) { name string ready bool subID types.UID + partition int32 originalKafkaSub *KafkaSubscription desiredKafkaSub *KafkaSubscription }{ { - name: "doesn't have the sub, add it (on ready)", - ready: true, - subID: "foo", + name: "doesn't have the sub, add it (on ready)", + ready: true, + subID: "foo", + partition: 0, originalKafkaSub: &KafkaSubscription{ - channelReadySubscriptions: sets.String{"bar": sets.Empty{}}, + channelReadySubscriptions: map[string]sets.Int32{"bar": sets.NewInt32(0)}, }, desiredKafkaSub: &KafkaSubscription{ - subs: []types.UID{}, - channelReadySubscriptions: sets.String{"bar": sets.Empty{}, "foo": sets.Empty{}}, + subs: []types.UID{}, + channelReadySubscriptions: map[string]sets.Int32{ + "bar": sets.NewInt32(0), + "foo": sets.NewInt32(0), + }, }, }, { - name: "has the sub already (on ready)", - ready: true, - subID: "foo", + name: "has the sub but not the partition, add it (on ready)", + ready: true, + subID: "foo", + partition: 1, originalKafkaSub: &KafkaSubscription{ - channelReadySubscriptions: sets.String{"foo": sets.Empty{}, "bar": sets.Empty{}}, + channelReadySubscriptions: map[string]sets.Int32{ + "bar": sets.NewInt32(0), + "foo": sets.NewInt32(0), + }, }, desiredKafkaSub: &KafkaSubscription{ - channelReadySubscriptions: sets.String{"foo": sets.Empty{}, "bar": sets.Empty{}}, + subs: []types.UID{}, + channelReadySubscriptions: map[string]sets.Int32{ + "bar": sets.NewInt32(0), + "foo": sets.NewInt32(0, 1), + }, }, }, { - name: "has the sub, delete it (on !ready)", - ready: false, - subID: "foo", + name: "has the sub and partition already (on ready)", + ready: true, + subID: "foo", + partition: 0, originalKafkaSub: &KafkaSubscription{ - channelReadySubscriptions: sets.String{"foo": sets.Empty{}, "bar": sets.Empty{}}, + channelReadySubscriptions: map[string]sets.Int32{ + "bar": sets.NewInt32(0), + "foo": sets.NewInt32(0), + }}, + desiredKafkaSub: &KafkaSubscription{ + channelReadySubscriptions: map[string]sets.Int32{ + "bar": sets.NewInt32(0), + "foo": sets.NewInt32(0), + }}, + }, + { + name: "has the sub with two partition, delete one (on !ready)", + ready: false, + subID: "foo", + partition: 1, + originalKafkaSub: &KafkaSubscription{ + channelReadySubscriptions: map[string]sets.Int32{ + "bar": sets.NewInt32(0), + "foo": sets.NewInt32(0, 1), + }, }, desiredKafkaSub: &KafkaSubscription{ - channelReadySubscriptions: sets.String{"bar": sets.Empty{}}, + channelReadySubscriptions: map[string]sets.Int32{ + "bar": sets.NewInt32(0), + "foo": sets.NewInt32(0), + }, }, }, { - name: "doesn't have the sub to delete (on !ready)", - ready: false, - subID: "foo", + name: "has the sub with one partition, delete sub (on !ready)", + ready: false, + subID: "foo", + partition: 0, originalKafkaSub: &KafkaSubscription{ - channelReadySubscriptions: sets.String{"bar": sets.Empty{}}, + channelReadySubscriptions: map[string]sets.Int32{ + "bar": sets.NewInt32(0), + "foo": sets.NewInt32(0), + }, }, desiredKafkaSub: &KafkaSubscription{ - channelReadySubscriptions: sets.String{"bar": sets.Empty{}}, + channelReadySubscriptions: map[string]sets.Int32{ + "bar": sets.NewInt32(0), + }, + }, + }, + { + name: "doesn't have the sub to delete (on !ready)", + ready: false, + subID: "foo", + partition: 0, + originalKafkaSub: &KafkaSubscription{ + channelReadySubscriptions: map[string]sets.Int32{ + "bar": sets.NewInt32(0), + }}, + desiredKafkaSub: &KafkaSubscription{ + channelReadySubscriptions: map[string]sets.Int32{ + "bar": sets.NewInt32(0), + }, }, }, } @@ -504,7 +561,7 @@ func TestSetReady(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Logf("Running %s", t.Name()) tc.originalKafkaSub.logger = logger - tc.originalKafkaSub.SetReady(tc.subID, tc.ready) + tc.originalKafkaSub.SetReady(tc.subID, tc.partition, tc.ready) if diff := cmp.Diff(tc.desiredKafkaSub.channelReadySubscriptions, tc.originalKafkaSub.channelReadySubscriptions); diff != "" { t.Errorf("unexpected ChannelReadySubscription (-want, +got) = %v", diff) } @@ -540,11 +597,11 @@ func TestServeHTTP(t *testing.T) { name: "no ready subscribers", httpMethod: httpGet, responseReturnCode: http.StatusOK, - desiredJson: []byte(`{"bar/foo":[]}`), + desiredJson: []byte(`{}`), channelSubs: map[eventingchannels.ChannelReference]*KafkaSubscription{ {Name: "foo", Namespace: "bar"}: { subs: []types.UID{}, - channelReadySubscriptions: sets.String{}, + channelReadySubscriptions: map[string]sets.Int32{}, }, }, requestURI: "/bar/foo", @@ -555,36 +612,47 @@ func TestServeHTTP(t *testing.T) { responseReturnCode: http.StatusNotFound, channelSubs: map[eventingchannels.ChannelReference]*KafkaSubscription{ {Name: "foo", Namespace: "baz"}: { - subs: []types.UID{"a", "b"}, - channelReadySubscriptions: sets.String{"a": sets.Empty{}, "b": sets.Empty{}}, + subs: []types.UID{"a", "b"}, + channelReadySubscriptions: map[string]sets.Int32{ + "a": sets.NewInt32(0), + "b": sets.NewInt32(0), + }, }, }, requestURI: "/bar/foo", }, { name: "return correct subscription", httpMethod: httpGet, - desiredJson: []byte(`{"bar/foo":["a","b"]}`), + desiredJson: []byte(`{"a":[0],"b":[0,2,5]}`), responseReturnCode: http.StatusOK, channelSubs: map[eventingchannels.ChannelReference]*KafkaSubscription{ {Name: "foo", Namespace: "bar"}: { - subs: []types.UID{"a", "b"}, - channelReadySubscriptions: sets.String{"a": sets.Empty{}, "b": sets.Empty{}}, + subs: []types.UID{"a", "b"}, + channelReadySubscriptions: map[string]sets.Int32{ + "a": sets.NewInt32(0), + "b": sets.NewInt32(0, 2, 5), + }, }, }, requestURI: "/bar/foo", }, { name: "return correct subscription from multiple chanrefs", httpMethod: httpGet, - desiredJson: []byte(`{"bar/foo":["a","b"]}`), + desiredJson: []byte(`{"a":[0],"b":[0,2,5]}`), responseReturnCode: http.StatusOK, channelSubs: map[eventingchannels.ChannelReference]*KafkaSubscription{ {Name: "table", Namespace: "flip"}: { - subs: []types.UID{"c", "d"}, - channelReadySubscriptions: sets.String{"c": sets.Empty{}}, - }, + subs: []types.UID{"c", "d"}, + channelReadySubscriptions: map[string]sets.Int32{ + "c": sets.NewInt32(0), + "d": sets.NewInt32(0), + }}, {Name: "foo", Namespace: "bar"}: { - subs: []types.UID{"a", "b"}, - channelReadySubscriptions: sets.String{"a": sets.Empty{}, "b": sets.Empty{}}, + subs: []types.UID{"a", "b"}, + channelReadySubscriptions: map[string]sets.Int32{ + "a": sets.NewInt32(0), + "b": sets.NewInt32(0, 2, 5), + }, }, }, requestURI: "/bar/foo", diff --git a/pkg/channel/consolidated/dispatcher/kafka_subscription.go b/pkg/channel/consolidated/dispatcher/kafka_subscription.go index 34a4977f7f..8f54c4a77c 100644 --- a/pkg/channel/consolidated/dispatcher/kafka_subscription.go +++ b/pkg/channel/consolidated/dispatcher/kafka_subscription.go @@ -30,23 +30,29 @@ type KafkaSubscription struct { subs []types.UID // readySubscriptionsLock must be used to synchronize access to channelReadySubscriptions readySubscriptionsLock sync.RWMutex - channelReadySubscriptions sets.String + channelReadySubscriptions map[string]sets.Int32 } // SetReady will mark the subid in the KafkaSubscription and call any registered callbacks -func (ks *KafkaSubscription) SetReady(subID types.UID, ready bool) { +func (ks *KafkaSubscription) SetReady(subID types.UID, partition int32, ready bool) { ks.logger.Debugw("Setting subscription readiness", zap.Any("subscription", subID), zap.Bool("ready", ready)) ks.readySubscriptionsLock.Lock() defer ks.readySubscriptionsLock.Unlock() if ready { - if !ks.channelReadySubscriptions.Has(string(subID)) { - ks.logger.Debugw("Caching ready subscription", zap.Any("subscription", subID)) - ks.channelReadySubscriptions.Insert(string(subID)) + if subs, ok := ks.channelReadySubscriptions[string(subID)]; ok { + ks.logger.Debugw("Adding ready ready partition to cached subscription", zap.Any("subscription", subID), zap.Int32("partition", partition)) + subs.Insert(partition) + } else { + ks.logger.Debugw("Caching ready subscription", zap.Any("subscription", subID), zap.Int32("partition", partition)) + ks.channelReadySubscriptions[string(subID)] = sets.NewInt32(partition) } } else { - if ks.channelReadySubscriptions.Has(string(subID)) { - ks.logger.Debugw("Ejecting cached ready subscription", zap.Any("subscription", subID)) - ks.channelReadySubscriptions.Delete(string(subID)) + if subs, ok := ks.channelReadySubscriptions[string(subID)]; ok { + ks.logger.Debugw("Ejecting cached ready subscription", zap.Any("subscription", subID), zap.Int32("partition", partition)) + subs.Delete(partition) + if subs.Len() == 0 { + delete(ks.channelReadySubscriptions, string(subID)) + } } } } diff --git a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go index 82f01bba7f..8a2fe567f7 100644 --- a/pkg/channel/consolidated/reconciler/controller/kafkachannel.go +++ b/pkg/channel/consolidated/reconciler/controller/kafkachannel.go @@ -254,11 +254,7 @@ func (r *Reconciler) setupSubscriptionStatusWatcher(ctx context.Context, ch *v1b after.Status.Subscribers = make([]v1.SubscriberStatus, 0) for _, s := range ch.Spec.Subscribers { - r, err := r.statusManager.IsReady(ctx, *ch, s) - if err != nil { - return fmt.Errorf("failed checking subscription readiness: %v", err) - } - if r { + if r, _ := r.statusManager.IsReady(ctx, *ch, s); r { logging.FromContext(ctx).Debugw("marking subscription", zap.Any("subscription", s)) after.Status.Subscribers = append(after.Status.Subscribers, v1.SubscriberStatus{ UID: s.UID, @@ -545,6 +541,7 @@ func (r *Reconciler) deleteTopic(ctx context.Context, channel *v1beta1.KafkaChan logger.Infow("Deleting topic on Kafka Cluster", zap.String("topic", topicName)) err := kafkaClusterAdmin.DeleteTopic(topicName) if err == sarama.ErrUnknownTopicOrPartition { + logger.Debugw("Received an unknown topic or partition response. Ignoring") return nil } else if err != nil { logger.Errorw("Error deleting topic", zap.String("topic", topicName), zap.Error(err)) @@ -580,15 +577,22 @@ func (r *Reconciler) updateKafkaConfig(ctx context.Context, configMap *corev1.Co func (r *Reconciler) FinalizeKind(ctx context.Context, kc *v1beta1.KafkaChannel) pkgreconciler.Event { // Do not attempt retrying creating the client because it might be a permanent error // in which case the finalizer will never get removed. - if kafkaClusterAdmin, err := r.createClient(ctx); err == nil && r.kafkaConfig != nil { + logger := logging.FromContext(ctx) + channel := fmt.Sprintf("%s/%s", kc.GetNamespace(), kc.GetName()) + logger.Debugw("FinalizeKind", zap.String("channel", channel)) + kafkaClusterAdmin, err := r.createClient(ctx) + if err != nil || r.kafkaConfig == nil { + logger.Errorw("Can't obtain Kafka Client", zap.String("channel", channel), zap.Error(err)) + } else { + logger.Debugw("Got client, about to delete topic") if err := r.deleteTopic(ctx, kc, kafkaClusterAdmin); err != nil { + logger.Errorw("Error deleting Kafka channel topic", zap.String("channel", channel), zap.Error(err)) return err } } - for _, s := range kc.Spec.Subscribers { + logger.Debugw("Canceling probing", zap.String("channel", channel), zap.Any("subscription", s)) r.statusManager.CancelProbing(s) } - return newReconciledNormal(kc.Namespace, kc.Name) //ok to remove finalizer } diff --git a/pkg/channel/consolidated/reconciler/dispatcher/kafkachannel.go b/pkg/channel/consolidated/reconciler/dispatcher/kafkachannel.go index e55d10d560..03b7b03dad 100644 --- a/pkg/channel/consolidated/reconciler/dispatcher/kafkachannel.go +++ b/pkg/channel/consolidated/reconciler/dispatcher/kafkachannel.go @@ -157,6 +157,16 @@ func (r *Reconciler) ObserveKind(ctx context.Context, kc *v1beta1.KafkaChannel) return r.syncDispatcher(ctx) } +func (r *Reconciler) FinalizeKind(ctx context.Context, kc *v1beta1.KafkaChannel) pkgreconciler.Event { + r.kafkaDispatcher.CleanupChannel(ctx, kc) + return nil +} + +func (r *Reconciler) ObserveFinalizeKind(ctx context.Context, kc *v1beta1.KafkaChannel) pkgreconciler.Event { + r.kafkaDispatcher.CleanupChannel(ctx, kc) + return nil +} + func (r *Reconciler) syncDispatcher(ctx context.Context) pkgreconciler.Event { channels, err := r.kafkachannelLister.List(labels.Everything()) if err != nil { diff --git a/pkg/channel/consolidated/status/status.go b/pkg/channel/consolidated/status/status.go index bb1d53e715..850c4c9b42 100644 --- a/pkg/channel/consolidated/status/status.go +++ b/pkg/channel/consolidated/status/status.go @@ -39,7 +39,6 @@ import ( messagingv1beta1 "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1" "knative.dev/networking/pkg/prober" - "knative.dev/pkg/kmeta" "knative.dev/pkg/logging" ) @@ -60,12 +59,13 @@ type targetState struct { sub eventingduckv1.SubscriberSpec ch messagingv1beta1.KafkaChannel + readyLock sync.RWMutex // pendingCount is the number of pods that haven't been successfully probed yet pendingCount atomic.Int32 // readyCount is the number of pods that have the subscription ready - readyCount atomic.Int32 - initialCount int - lastAccessed time.Time + readyPartitions sets.Int + initialCount int + lastAccessed time.Time cancel func() } @@ -174,13 +174,9 @@ func (m *Prober) checkReadiness(state *targetState) bool { m.logger.Debugw("Checking subscription readiness", zap.Any("initial probed consumers", consumers), zap.Any("channel partitions", partitions), - zap.Any("ready consumers", state.readyCount.Load()), + zap.Any("ready partitions", state.readyPartitions.List()), ) - if consumers > partitions { - return state.readyCount.Load() == partitions - } else { - return state.readyCount.Load() == consumers - } + return state.readyPartitions.Len() == int(partitions) } func (m *Prober) IsReady(ctx context.Context, ch messagingv1beta1.KafkaChannel, sub eventingduckv1.SubscriberSpec) (bool, error) { @@ -236,7 +232,7 @@ func (m *Prober) IsReady(ctx context.Context, ch messagingv1beta1.KafkaChannel, subscriptionState.initialCount = target.PodIPs.Len() subscriptionState.pendingCount.Store(int32(len(workItems))) - subscriptionState.readyCount.Store(0) + subscriptionState.readyPartitions = sets.Int{} for ip, ipWorkItems := range workItems { // Get or create the context for that IP @@ -328,17 +324,12 @@ func (m *Prober) Start(done <-chan struct{}) chan struct{} { // CancelProbing cancels probing of the provided Subscription func (m *Prober) CancelProbing(sub eventingduckv1.SubscriberSpec) { - acc, err := kmeta.DeletionHandlingAccessor(sub) - if err != nil { - return - } - - key := acc.GetUID() m.mu.Lock() defer m.mu.Unlock() - if state, ok := m.targetStates[key]; ok { + if state, ok := m.targetStates[sub.UID]; ok { + m.logger.Debugw("Canceling state", zap.Any("subscription", sub)) state.cancel() - delete(m.targetStates, key) + delete(m.targetStates, sub.UID) } } @@ -451,7 +442,7 @@ func (m *Prober) probeVerifier(item *workItem) prober.Verifier { zap.ByteString("body", b)) switch r.StatusCode { case http.StatusOK: - var subscriptions = make(map[string][]string) + var subscriptions = make(map[string][]int) err := json.Unmarshal(b, &subscriptions) if err != nil { m.logger.Errorw("error unmarshaling", err) @@ -465,8 +456,10 @@ func (m *Prober) probeVerifier(item *workItem) prober.Verifier { zap.String("want channel", key), zap.String("want subscription", uid), ) - if subs, ok := subscriptions[key]; ok && sets.NewString(subs...).Has(uid) { - item.targetStates.readyCount.Inc() + if partitions, ok := subscriptions[uid]; ok { + item.targetStates.readyLock.Lock() + defer item.targetStates.readyLock.Unlock() + item.targetStates.readyPartitions.Insert(partitions...) return true, nil } else { return false, nil diff --git a/pkg/channel/consolidated/status/status_test.go b/pkg/channel/consolidated/status/status_test.go index c926485a2e..c9d0efe60a 100644 --- a/pkg/channel/consolidated/status/status_test.go +++ b/pkg/channel/consolidated/status/status_test.go @@ -20,7 +20,6 @@ import ( "context" "encoding/json" "errors" - "fmt" "net/http" "net/http/httptest" "net/url" @@ -69,12 +68,8 @@ func handleProbe(t *testing.T) func(http.ResponseWriter, *http.Request) { return func(w http.ResponseWriter, r *http.Request) { channelRefName := channelTemplate.ObjectMeta.Name channelRefNamespace := channelTemplate.ObjectMeta.Namespace - mapKeyName := fmt.Sprintf("%s/%s", channelRefNamespace, - channelRefName) - var subscriptions = map[string][]string{ - mapKeyName: { - string(subscriptionTemplate.UID), - }, + var subscriptions = map[string][]int{ + string(subscriptionTemplate.UID): {0}, } w.Header().Set(dispatcherReadySubHeader, channelRefName) jsonResult, err := json.Marshal(subscriptions) diff --git a/pkg/common/consumer/consumer_handler.go b/pkg/common/consumer/consumer_handler.go index 18492ec93d..681262877d 100644 --- a/pkg/common/consumer/consumer_handler.go +++ b/pkg/common/consumer/consumer_handler.go @@ -28,7 +28,7 @@ type KafkaConsumerHandler interface { // When this function returns true, the consumer group offset is marked as consumed. // The returned error is enqueued in errors channel. Handle(context context.Context, message *sarama.ConsumerMessage) (bool, error) - SetReady(ready bool) + SetReady(partition int32, ready bool) GetConsumerGroup() string } @@ -60,15 +60,21 @@ func (consumer *SaramaConsumerHandler) Setup(sarama.ConsumerGroupSession) error // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited func (consumer *SaramaConsumerHandler) Cleanup(session sarama.ConsumerGroupSession) error { - consumer.logger.Info("cleanup handler") - consumer.handler.SetReady(false) + consumer.logger.Infow("Cleanup handler") + for t, ps := range session.Claims() { + for _, p := range ps { + consumer.logger.Debugw("Cleanup handler: Setting partition readiness to false", zap.String("topic", t), + zap.Int32("partition", p)) + consumer.handler.SetReady(p, false) + } + } return nil } // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages(). func (consumer *SaramaConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { consumer.logger.Infow(fmt.Sprintf("Starting partition consumer, topic: %s, partition: %d, initialOffset: %d", claim.Topic(), claim.Partition(), claim.InitialOffset()), zap.String("ConsumeGroup", consumer.handler.GetConsumerGroup())) - consumer.handler.SetReady(true) + consumer.handler.SetReady(claim.Partition(), true) // NOTE: // Do not move the code below to a goroutine. // The `ConsumeClaim` itself is called within a goroutine, see: @@ -86,7 +92,7 @@ func (consumer *SaramaConsumerHandler) ConsumeClaim(session sarama.ConsumerGroup if err != nil { consumer.logger.Infow("Failure while handling a message", zap.String("topic", message.Topic), zap.Int32("partition", message.Partition), zap.Int64("offset", message.Offset), zap.Error(err)) consumer.errors <- err - consumer.handler.SetReady(false) + consumer.handler.SetReady(claim.Partition(), false) } if mustMark { diff --git a/pkg/common/consumer/consumer_handler_test.go b/pkg/common/consumer/consumer_handler_test.go index 4d8fdac959..2835872580 100644 --- a/pkg/common/consumer/consumer_handler_test.go +++ b/pkg/common/consumer/consumer_handler_test.go @@ -114,7 +114,7 @@ func (m mockMessageHandler) Handle(ctx context.Context, message *sarama.Consumer } } -func (m mockMessageHandler) SetReady(ready bool) { +func (m mockMessageHandler) SetReady(int32, bool) { } func (m mockMessageHandler) GetConsumerGroup() string { diff --git a/pkg/source/adapter/adapter.go b/pkg/source/adapter/adapter.go index 6c4c4b0ac5..6c7dd48c58 100644 --- a/pkg/source/adapter/adapter.go +++ b/pkg/source/adapter/adapter.go @@ -125,7 +125,7 @@ func (a *Adapter) Start(ctx context.Context) error { return nil } -func (a *Adapter) SetReady(_ bool) {} +func (a *Adapter) SetReady(int32, bool) {} func (a *Adapter) Handle(ctx context.Context, msg *sarama.ConsumerMessage) (bool, error) { if a.rateLimiter != nil { From 5654168ca64a84b74ec5f50c12db3ed11125b8ca Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Fri, 19 Mar 2021 13:12:25 +0100 Subject: [PATCH 16/17] Change cleanup logic to clean ready subscriptions only Signed-off-by: Ahmed Abdalla --- pkg/channel/consolidated/dispatcher/dispatcher.go | 11 +++-------- pkg/channel/consolidated/status/status.go | 11 +---------- 2 files changed, 4 insertions(+), 18 deletions(-) diff --git a/pkg/channel/consolidated/dispatcher/dispatcher.go b/pkg/channel/consolidated/dispatcher/dispatcher.go index 46c4e4551c..ff1da9d90c 100644 --- a/pkg/channel/consolidated/dispatcher/dispatcher.go +++ b/pkg/channel/consolidated/dispatcher/dispatcher.go @@ -379,14 +379,9 @@ func (d *KafkaDispatcher) CleanupChannel(ctx context.Context, kc *v1beta1.KafkaC } d.logger.Infow("Cleaning up KafkaChannel cached resources", zap.Any("kafkachannel", channelRef)) if kafkaSub, ok := d.channelSubscriptions[channelRef]; ok { - for _, s := range kafkaSub.subs { - if c, ok := d.subsConsumerGroups[s]; ok { - delete(d.subsConsumerGroups, s) - d.logger.Debugw("Closing cached consumer group", zap.Any("consumer group", c)) - c.Close() - } - } - delete(d.channelSubscriptions, channelRef) + kafkaSub.readySubscriptionsLock.Lock() + defer kafkaSub.readySubscriptionsLock.Unlock() + kafkaSub.channelReadySubscriptions = map[string]sets.Int32{} } } diff --git a/pkg/channel/consolidated/status/status.go b/pkg/channel/consolidated/status/status.go index 850c4c9b42..092d2c696d 100644 --- a/pkg/channel/consolidated/status/status.go +++ b/pkg/channel/consolidated/status/status.go @@ -18,7 +18,6 @@ package status import ( "context" - "crypto/sha256" "encoding/json" "fmt" "net" @@ -160,14 +159,6 @@ func NewProber( } } -func computeHash(sub eventingduckv1.SubscriberSpec) ([sha256.Size]byte, error) { - bytes, err := json.Marshal(sub) - if err != nil { - return [sha256.Size]byte{}, fmt.Errorf("failed to serialize Subscription: %w", err) - } - return sha256.Sum256(bytes), nil -} - func (m *Prober) checkReadiness(state *targetState) bool { consumers := int32(state.initialCount) partitions := state.ch.Spec.NumPartitions @@ -395,7 +386,7 @@ func (m *Prober) processWorkItem() bool { if err != nil || !ok { // In case of error, enqueue for retry m.workQueue.AddRateLimited(obj) - item.logger.Errorf("Probing of %s failed, IP: %s:%s, ready: %t, error: %v (depth: %d)", + item.logger.Debugw("Probing of %s failed, IP: %s:%s, ready: %t, error: %v (depth: %d)", item.url, item.podIP, item.podPort, ok, err, m.workQueue.Len()) } else { m.onProbingSuccess(item.targetStates, item.podState) From 1836b974d87d3d893abd2387769d85801143e14e Mon Sep 17 00:00:00 2001 From: Ahmed Abdalla Date: Fri, 19 Mar 2021 13:33:32 +0100 Subject: [PATCH 17/17] Remove cleanup to avaid consumers race Signed-off-by: Ahmed Abdalla --- .../consolidated/dispatcher/dispatcher.go | 16 ---------------- .../reconciler/dispatcher/kafkachannel.go | 10 ---------- pkg/channel/consolidated/status/status.go | 2 +- 3 files changed, 1 insertion(+), 27 deletions(-) diff --git a/pkg/channel/consolidated/dispatcher/dispatcher.go b/pkg/channel/consolidated/dispatcher/dispatcher.go index ff1da9d90c..2a83999c17 100644 --- a/pkg/channel/consolidated/dispatcher/dispatcher.go +++ b/pkg/channel/consolidated/dispatcher/dispatcher.go @@ -34,7 +34,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" - "knative.dev/eventing-kafka/pkg/apis/messaging/v1beta1" "knative.dev/eventing-kafka/pkg/channel/consolidated/utils" "knative.dev/eventing-kafka/pkg/channel/distributed/common/env" "knative.dev/eventing-kafka/pkg/common/client" @@ -370,21 +369,6 @@ func (d *KafkaDispatcher) getChannelReferenceFromHost(host string) (eventingchan return cr, nil } -func (d *KafkaDispatcher) CleanupChannel(ctx context.Context, kc *v1beta1.KafkaChannel) { - d.consumerUpdateLock.Lock() - defer d.consumerUpdateLock.Unlock() - channelRef := eventingchannels.ChannelReference{ - Name: kc.GetName(), - Namespace: kc.GetNamespace(), - } - d.logger.Infow("Cleaning up KafkaChannel cached resources", zap.Any("kafkachannel", channelRef)) - if kafkaSub, ok := d.channelSubscriptions[channelRef]; ok { - kafkaSub.readySubscriptionsLock.Lock() - defer kafkaSub.readySubscriptionsLock.Unlock() - kafkaSub.channelReadySubscriptions = map[string]sets.Int32{} - } -} - func uidSetDifference(a, b []types.UID) (diff []types.UID) { m := make(map[types.UID]bool) diff --git a/pkg/channel/consolidated/reconciler/dispatcher/kafkachannel.go b/pkg/channel/consolidated/reconciler/dispatcher/kafkachannel.go index 03b7b03dad..e55d10d560 100644 --- a/pkg/channel/consolidated/reconciler/dispatcher/kafkachannel.go +++ b/pkg/channel/consolidated/reconciler/dispatcher/kafkachannel.go @@ -157,16 +157,6 @@ func (r *Reconciler) ObserveKind(ctx context.Context, kc *v1beta1.KafkaChannel) return r.syncDispatcher(ctx) } -func (r *Reconciler) FinalizeKind(ctx context.Context, kc *v1beta1.KafkaChannel) pkgreconciler.Event { - r.kafkaDispatcher.CleanupChannel(ctx, kc) - return nil -} - -func (r *Reconciler) ObserveFinalizeKind(ctx context.Context, kc *v1beta1.KafkaChannel) pkgreconciler.Event { - r.kafkaDispatcher.CleanupChannel(ctx, kc) - return nil -} - func (r *Reconciler) syncDispatcher(ctx context.Context) pkgreconciler.Event { channels, err := r.kafkachannelLister.List(labels.Everything()) if err != nil { diff --git a/pkg/channel/consolidated/status/status.go b/pkg/channel/consolidated/status/status.go index 092d2c696d..b42f7bf0f1 100644 --- a/pkg/channel/consolidated/status/status.go +++ b/pkg/channel/consolidated/status/status.go @@ -43,7 +43,7 @@ import ( const ( // probeConcurrency defines how many probing calls can be issued simultaneously - probeConcurrency = 15 + probeConcurrency = 100 // probeTimeout defines the maximum amount of time a request will wait probeTimeout = 1 * time.Second // initialDelay defines the delay before enqueuing a probing request the first time.