From aad2c527c5defcf89b5afab7f37274304195a6b2 Mon Sep 17 00:00:00 2001 From: rghetia Date: Fri, 8 Nov 2019 16:00:05 -0800 Subject: [PATCH] exclude zero bucket from aggregation_data (#1183) * exclude zero bucket from aggregation_data * fix error string in test file. * add one more testcase. --- go.mod | 1 - stats/view/aggregation.go | 9 +- stats/view/aggregation_data.go | 6 +- stats/view/aggregation_data_test.go | 10 ++- stats/view/view_to_metric_test.go | 134 ++++++++++++++++++++++++++++ 5 files changed, 150 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index 139157cd3..c867df5f5 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,6 @@ require ( golang.org/x/net v0.0.0-20190620200207-3b0461eec859 golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect golang.org/x/text v0.3.2 // indirect - google.golang.org/appengine v1.4.0 // indirect google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect google.golang.org/grpc v1.20.1 ) diff --git a/stats/view/aggregation.go b/stats/view/aggregation.go index 8bd25314e..9d7093728 100644 --- a/stats/view/aggregation.go +++ b/stats/view/aggregation.go @@ -99,13 +99,14 @@ func Sum() *Aggregation { // If len(bounds) is 1 then there is no finite buckets, and that single // element is the common boundary of the overflow and underflow buckets. func Distribution(bounds ...float64) *Aggregation { - return &Aggregation{ + agg := &Aggregation{ Type: AggTypeDistribution, Buckets: bounds, - newData: func() AggregationData { - return newDistributionData(bounds) - }, } + agg.newData = func() AggregationData { + return newDistributionData(agg) + } + return agg } // LastValue only reports the last value recorded using this diff --git a/stats/view/aggregation_data.go b/stats/view/aggregation_data.go index d500e67f7..f331d456e 100644 --- a/stats/view/aggregation_data.go +++ b/stats/view/aggregation_data.go @@ -128,12 +128,12 @@ type DistributionData struct { bounds []float64 // histogram distribution of the values } -func newDistributionData(bounds []float64) *DistributionData { - bucketCount := len(bounds) + 1 +func newDistributionData(agg *Aggregation) *DistributionData { + bucketCount := len(agg.Buckets) + 1 return &DistributionData{ CountPerBucket: make([]int64, bucketCount), ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), - bounds: bounds, + bounds: agg.Buckets, Min: math.MaxFloat64, Max: math.SmallestNonzeroFloat64, } diff --git a/stats/view/aggregation_data_test.go b/stats/view/aggregation_data_test.go index a7e056752..7d09a8fe4 100644 --- a/stats/view/aggregation_data_test.go +++ b/stats/view/aggregation_data_test.go @@ -26,7 +26,10 @@ import ( ) func TestDataClone(t *testing.T) { - dist := newDistributionData([]float64{1, 2, 3, 4}) + agg := &Aggregation{ + Buckets: []float64{1, 2, 3, 4}, + } + dist := newDistributionData(agg) dist.Count = 7 dist.Max = 11 dist.Min = 1 @@ -66,7 +69,10 @@ func TestDataClone(t *testing.T) { } func TestDistributionData_addSample(t *testing.T) { - dd := newDistributionData([]float64{1, 2}) + agg := &Aggregation{ + Buckets: []float64{1, 2}, + } + dd := newDistributionData(agg) attachments1 := map[string]interface{}{"key1": "value1"} t1 := time.Now() dd.addSample(0.5, attachments1, t1) diff --git a/stats/view/view_to_metric_test.go b/stats/view/view_to_metric_test.go index 6c82fb9dc..18c877117 100644 --- a/stats/view/view_to_metric_test.go +++ b/stats/view/view_to_metric_test.go @@ -24,6 +24,7 @@ import ( "github.com/google/go-cmp/cmp" "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricexport" "go.opencensus.io/stats" "go.opencensus.io/tag" ) @@ -516,6 +517,139 @@ func TestUnitConversionForAggCount(t *testing.T) { } } +type mockExp struct { + metrics []*metricdata.Metric +} + +func (me *mockExp) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { + me.metrics = append(me.metrics, metrics...) + return nil +} + +var _ metricexport.Exporter = (*mockExp)(nil) + +func TestViewToMetric_OutOfOrderWithZeroBuckets(t *testing.T) { + m := stats.Int64("OutOfOrderWithZeroBuckets", "", "") + now := time.Now() + tts := []struct { + v *View + m *metricdata.Metric + }{ + { + v: &View{ + Name: m.Name() + "_order1", + Measure: m, + Aggregation: Distribution(10, 0, 2), + }, + m: &metricdata.Metric{ + Descriptor: metricdata.Descriptor{ + Name: "OutOfOrderWithZeroBuckets_order1", + Unit: metricdata.UnitDimensionless, + Type: metricdata.TypeCumulativeDistribution, + LabelKeys: []metricdata.LabelKey{}, + }, + TimeSeries: []*metricdata.TimeSeries{ + {Points: []metricdata.Point{ + {Value: &metricdata.Distribution{ + Count: 3, + Sum: 9.0, + SumOfSquaredDeviation: 8, + BucketOptions: &metricdata.BucketOptions{ + Bounds: []float64{2, 10}, + }, + Buckets: []metricdata.Bucket{ + {Count: 1, Exemplar: nil}, + {Count: 2, Exemplar: nil}, + {Count: 0, Exemplar: nil}, + }, + }, + Time: now, + }, + }, + StartTime: now, + LabelValues: []metricdata.LabelValue{}, + }, + }, + }, + }, + { + v: &View{ + Name: m.Name() + "_order2", + Measure: m, + Aggregation: Distribution(0, 5, 10), + }, + m: &metricdata.Metric{ + Descriptor: metricdata.Descriptor{ + Name: "OutOfOrderWithZeroBuckets_order2", + Unit: metricdata.UnitDimensionless, + Type: metricdata.TypeCumulativeDistribution, + LabelKeys: []metricdata.LabelKey{}, + }, + TimeSeries: []*metricdata.TimeSeries{ + {Points: []metricdata.Point{ + {Value: &metricdata.Distribution{ + Count: 3, + Sum: 9.0, + SumOfSquaredDeviation: 8, + BucketOptions: &metricdata.BucketOptions{ + Bounds: []float64{5, 10}, + }, + Buckets: []metricdata.Bucket{ + {Count: 2, Exemplar: nil}, + {Count: 1, Exemplar: nil}, + {Count: 0, Exemplar: nil}, + }, + }, + Time: now, + }, + }, + StartTime: now, + LabelValues: []metricdata.LabelValue{}, + }, + }, + }, + }, + } + for _, tt := range tts { + err := Register(tt.v) + if err != nil { + t.Fatalf("error registering view %v, err: %v", tt.v, err) + } + + } + + stats.Record(context.Background(), m.M(5), m.M(1), m.M(3)) + time.Sleep(1 * time.Second) + + me := &mockExp{} + reader := metricexport.NewReader() + reader.ReadAndExport(me) + + var got *metricdata.Metric + lookup := func(vname string, metrics []*metricdata.Metric) *metricdata.Metric { + for _, m := range metrics { + if m.Descriptor.Name == vname { + return m + } + } + return nil + } + + for _, tt := range tts { + got = lookup(tt.v.Name, me.metrics) + if got == nil { + t.Fatalf("metric %s not found in %v\n", tt.v.Name, me.metrics) + } + got.TimeSeries[0].Points[0].Time = now + got.TimeSeries[0].StartTime = now + + want := tt.m + if diff := cmp.Diff(got, want); diff != "" { + t.Errorf("buckets differ -got +want: %s \n Serialized got %v\n, Serialized want %v\n", diff, serializeAsJSON(got), serializeAsJSON(want)) + } + } +} + func serializeAsJSON(v interface{}) string { blob, _ := json.MarshalIndent(v, "", " ") return string(blob)