Skip to content
This repository has been archived by the owner on Jun 19, 2022. It is now read-only.

PullSubscription and Storage importer metrics #250

Merged
merged 31 commits into from
Sep 12, 2019
Merged
Show file tree
Hide file tree
Changes from 23 commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
8d57620
in progress
nachocano Sep 5, 2019
af2b584
updates
nachocano Sep 6, 2019
ff99f22
updates for new pkg and cloudevents sdk
nachocano Sep 6, 2019
5213ff6
rollback the component name
nachocano Sep 6, 2019
2064d5d
adding pkg
nachocano Sep 6, 2019
1becaf0
adding env variables to the receive adapter
nachocano Sep 6, 2019
a005a5b
fixing webhook
nachocano Sep 6, 2019
13d6015
main metrics
nachocano Sep 9, 2019
c21f179
updating pkg
nachocano Sep 9, 2019
44a3312
cosmetics
nachocano Sep 9, 2019
a6e92e1
cosmetics
nachocano Sep 9, 2019
35269e9
passing the resource group underneath
nachocano Sep 9, 2019
fea7a18
removing wrong stuff
nachocano Sep 9, 2019
820e77a
updates
nachocano Sep 9, 2019
3210c44
changed to gcpimporter
nachocano Sep 9, 2019
2720c5a
rollback changes
nachocano Sep 9, 2019
7cc1193
updates
nachocano Sep 9, 2019
4c0af28
updates
nachocano Sep 9, 2019
f839808
updates
nachocano Sep 9, 2019
e7c7b45
updates after code review
nachocano Sep 10, 2019
88fd7a7
Merge branch 'storage_metrics' of github.com:nachocano/knative-gcp in…
nachocano Sep 10, 2019
9a1d3c2
updates after code review
nachocano Sep 10, 2019
02bfad1
updates
nachocano Sep 10, 2019
f72847c
Merge remote-tracking branch 'upstream/master' into storage_metrics
nachocano Sep 10, 2019
2e7c5d6
updates
nachocano Sep 11, 2019
55d9b48
cosmetics
nachocano Sep 11, 2019
73bcae5
do not change this
nachocano Sep 11, 2019
fa44e32
updating pkg
nachocano Sep 11, 2019
b08b329
updating to 0.9.1
nachocano Sep 11, 2019
f11712c
Merge remote-tracking branch 'upstream/master' into storage_metrics
nachocano Sep 12, 2019
b15a2fa
rollback metrics domain change to avoid problems
nachocano Sep 12, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 3 additions & 3 deletions Gopkg.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@ required = [

[[override]]
name = "knative.dev/pkg"
branch = "master"
source = "github.com/nachocano/pkg"
branch = "source_stats"

# This is a preemptive override.
[[override]]
Expand Down Expand Up @@ -62,8 +63,7 @@ required = [

[[override]]
name = "github.com/cloudevents/sdk-go"
# HEAD as of 2019-06-04
revision = "6fe382645effbc5d4a7055f4280e9c3d7f58a2b6"
version = "0.9.0"

[prune]
go-tests = true
Expand Down
50 changes: 22 additions & 28 deletions cmd/pubsub/receive_adapter/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,25 +17,16 @@ limitations under the License.
package main

import (
"cloud.google.com/go/compute/metadata"
"flag"
"fmt"
"log"
"time"

"cloud.google.com/go/compute/metadata"
"github.com/cloudevents/sdk-go/pkg/cloudevents/client"
"github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec"
"github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json"
"github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml"
transporthttp "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http"
"github.com/google/knative-gcp/pkg/pubsub/adapter"
"github.com/google/knative-gcp/pkg/reconciler/pullsubscription/resources"
"github.com/kelseyhightower/envconfig"
"go.opencensus.io/stats/view"
"go.uber.org/zap"
"knative.dev/pkg/logging"
"knative.dev/pkg/metrics"
"knative.dev/pkg/signals"
"log"
)

const (
Expand All @@ -50,8 +41,8 @@ func main() {
panic(fmt.Sprintf("Failed to process env var: %s", err))
}

// Convert base64 encoded json logging.Config to logging.Config.
loggingConfig, err := resources.Base64ToLoggingConfig(startable.LoggingConfigBase64)
// Convert json logging.Config to logging.Config.
loggingConfig, err := logging.JsonToLoggingConfig(startable.LoggingConfigJson)
if err != nil {
fmt.Printf("[ERROR] filed to process logging config: %s", err.Error())
// Use default logging config.
Expand All @@ -65,8 +56,8 @@ func main() {
defer flush(logger)
ctx := logging.WithLogger(signals.NewContext(), logger)

// Convert base64 encoded json metrics.ExporterOptions to metrics.ExporterOptions.
metricsConfig, err := resources.Base64ToMetricsOptions(startable.MetricsConfigBase64)
// Convert json metrics.ExporterOptions to metrics.ExporterOptions.
metricsConfig, err := metrics.JsonToMetricsOptions(startable.MetricsConfigJson)
if err != nil {
logger.Errorf("failed to process metrics options: %s", err.Error())
}
Expand Down Expand Up @@ -99,19 +90,22 @@ func mainMetrics(logger *zap.SugaredLogger, opts *metrics.ExporterOptions) {
log.Fatalf("Failed to create the metrics exporter: %v", err)
}

// Register the views
if err := view.Register(
client.LatencyView,
transporthttp.LatencyView,
json.LatencyView,
xml.LatencyView,
datacodec.LatencyView,
adapter.LatencyView,
); err != nil {
log.Fatalf("Failed to register views: %v", err)
}

view.SetReportingPeriod(2 * time.Second)
// TODO metrics are API surface, so make sure we need to expose this before doing so.
// They seem to be private ones and more profiling related ones.
// Commenting them for now, as we will use pkg stats_reporter.
// Register the views.
//if err := view.Register(
// client.LatencyView,
// transporthttp.LatencyView,
// json.LatencyView,
// xml.LatencyView,
// datacodec.LatencyView,
// adapter.LatencyView,
//); err != nil {
// log.Fatalf("Failed to register views: %v", err)
//}
//
//view.SetReportingPeriod(2 * time.Second)
}

func flush(logger *zap.SugaredLogger) {
Expand Down
31 changes: 21 additions & 10 deletions cmd/webhook/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,11 +20,10 @@ import (
"context"
"flag"
"fmt"
"log"
"net/http"

"golang.org/x/sync/errgroup"
"knative.dev/pkg/profiling"
"log"
"net/http"

"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime/schema"
Expand Down Expand Up @@ -103,21 +102,33 @@ func SharedMain(handlers map[schema.GroupVersionKind]webhook.GenericCRD) {
logger.Fatalw("Failed to start the ConfigMap watcher", zap.Error(err))
}

stats, err := webhook.NewStatsReporter()
if err != nil {
logger.Fatalw("failed to initialize the stats reporter", zap.Error(err))
}

options := webhook.ControllerOptions{
ServiceName: "webhook",
DeploymentName: "webhook",
Namespace: system.Namespace(),
Port: 8443,
SecretName: "webhook-certs",
WebhookName: fmt.Sprintf("webhook.%s.events.cloud.run", system.Namespace()),
ServiceName: "webhook",
DeploymentName: "webhook",
Namespace: system.Namespace(),
Port: 8443,
SecretName: "webhook-certs",
WebhookName: fmt.Sprintf("webhook.%s.events.cloud.run", system.Namespace()),
StatsReporter: stats,
ResourceAdmissionControllerPath: "/",
}

// Decorate contexts with the current state of the config.
ctxFunc := func(ctx context.Context) context.Context {
return ctx
}

controller, err := webhook.NewAdmissionController(kubeClient, options, handlers, logger, ctxFunc, true)
resourceAdmissionController := webhook.NewResourceAdmissionController(handlers, options, true)
admissionControllers := map[string]webhook.AdmissionController{
options.ResourceAdmissionControllerPath: resourceAdmissionController,
}

controller, err := webhook.New(kubeClient, options, admissionControllers, logger, ctxFunc)

if err != nil {
logger.Fatalw("Failed to create admission controller", zap.Error(err))
Expand Down
2 changes: 1 addition & 1 deletion config/500-controller.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ spec:
- name: CONFIG_OBSERVABILITY_NAME
value: config-observability
- name: METRICS_DOMAIN
value: cloud.run/events
value: knative.dev/eventing
volumeMounts:
- name: config-logging
mountPath: /etc/config-logging
Expand Down
2 changes: 2 additions & 0 deletions config/500-webhook.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,8 @@ spec:
fieldPath: metadata.namespace
- name: CONFIG_LOGGING_NAME
value: config-logging
- name: METRICS_DOMAIN
value: knative.dev/eventing
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

how will we know this is not knative eventing?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We don't. serving does the same, according to my understanding. Do you see a problem with this?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Serving does not have anything like this repo as extensions, so they have not had to think about or solve this.

volumes:
- name: config-logging
configMap:
Expand Down
63 changes: 1 addition & 62 deletions config/config-observability.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -37,80 +37,19 @@ data:
# this example block and unindented to be in the data block
# to actually change the configuration.

# logging.enable-var-log-collection defaults to false.
# A fluentd sidecar will be set up to collect var log if
# this flag is true.
logging.enable-var-log-collection: false

# logging.fluentd-sidecar-image provides the fluentd sidecar image
# to inject as a sidecar to collect logs from /var/log.
# Must be presented if logging.enable-var-log-collection is true.
logging.fluentd-sidecar-image: k8s.gcr.io/fluentd-elasticsearch:v2.0.4

# logging.fluentd-sidecar-output-config provides the configuration
# for the fluentd sidecar, which will be placed into a configmap and
# mounted into the fluentd sidecar image.
logging.fluentd-sidecar-output-config: |
# Parse json log before sending to Elastic Search
<filter **>
@type parser
key_name log
<parse>
@type multi_format
<pattern>
format json
time_key fluentd-time # fluentd-time is reserved for structured logs
time_format %Y-%m-%dT%H:%M:%S.%NZ
</pattern>
<pattern>
format none
message_key log
</pattern>
</parse>
</filter>
# Send to Elastic Search
<match **>
@id elasticsearch
@type elasticsearch
@log_level info
include_tag_key true
# Elasticsearch service is in monitoring namespace.
host elasticsearch-logging.knative-monitoring
port 9200
logstash_format true
<buffer>
@type file
path /var/log/fluentd-buffers/kubernetes.system.buffer
flush_mode interval
retry_type exponential_backoff
flush_thread_count 2
flush_interval 5s
retry_forever
retry_max_interval 30
chunk_limit_size 2M
queue_limit_length 8
overflow_action block
</buffer>
</match>

# metrics.backend-destination field specifies the system metrics destination.
# It supports either prometheus (the default) or stackdriver.
# Note: Using stackdriver will incur additional charges
metrics.backend-destination: prometheus

# metrics.request-metrics-backend-destination specifies the request metrics
# destination. If non-empty, it enables queue proxy to send request metrics.
# Currently supported values: prometheus, stackdriver.
metrics.request-metrics-backend-destination: prometheus

# metrics.stackdriver-project-id field specifies the stackdriver project ID. This
# field is optional. When running on GCE, application default credentials will be
# used if this field is not provided.
metrics.stackdriver-project-id: "<your stackdriver project id>"

# metrics.allow-stackdriver-custom-metrics indicates whether it is allowed to send metrics to
# Stackdriver using "global" resource type and custom metric type if the
# metrics are not supported by "knative_revision" resource type. Setting this
# metrics are not supported by "knative_importer" resource type. Setting this
# flag to "true" could cause extra Stackdriver charge.
# If metrics.backend-destination is not Stackdriver, this is ignored.
metrics.allow-stackdriver-custom-metrics: "false"
4 changes: 0 additions & 4 deletions pkg/apis/pubsub/v1alpha1/pull_subscription_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -148,10 +148,6 @@ func (ps PullSubscriptionSpec) GetRetentionDuration() time.Duration {
type ModeType string

const (
// PubSubEventType is the GcpPubSub CloudEvent type, in case PullSubscription
// doesn't send a CloudEvent itself.
PubSubEventType = "google.pubsub.topic.publish"

// ModeCloudEventsBinary will use CloudEvents binary HTTP mode with
// flattened Pub/Sub payload.
ModeCloudEventsBinary ModeType = "CloudEventsBinary"
Expand Down
2 changes: 1 addition & 1 deletion pkg/decorator/decorator.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ func (a *Decorator) receive(ctx context.Context, event cloudevents.Event, resp *
event.SetExtension(k, v)
}

if r, err := a.outbound.Send(ctx, event); err != nil {
if _, r, err := a.outbound.Send(ctx, event); err != nil {
return err
} else if r != nil {
resp.RespondWith(200, r)
Expand Down
Loading