From 9486334cf7568859653813ce4a8f1fa4e2840f80 Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Fri, 6 Sep 2024 15:12:28 +0200 Subject: [PATCH 01/18] add Marshal/Unmarshal proto to pprofile --- pdata/pprofile/pb.go | 31 ++++++++++++++++ pdata/pprofile/pb_test.go | 78 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+) create mode 100644 pdata/pprofile/pb.go create mode 100644 pdata/pprofile/pb_test.go diff --git a/pdata/pprofile/pb.go b/pdata/pprofile/pb.go new file mode 100644 index 00000000000..b9c8bacd14c --- /dev/null +++ b/pdata/pprofile/pb.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile // import "go.opentelemetry.io/collector/pdata/pprofile" + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpprofile "go.opentelemetry.io/collector/pdata/internal/data/protogen/profiles/v1experimental" +) + +var _ MarshalSizer = (*ProtoMarshaler)(nil) + +type ProtoMarshaler struct{} + +func (e *ProtoMarshaler) MarshalProfiles(td Profiles) ([]byte, error) { + pb := internal.ProfilesToProto(internal.Profiles(td)) + return pb.Marshal() +} + +func (e *ProtoMarshaler) ProfilesSize(td Profiles) int { + pb := internal.ProfilesToProto(internal.Profiles(td)) + return pb.Size() +} + +type ProtoUnmarshaler struct{} + +func (d *ProtoUnmarshaler) UnmarshalProfiles(buf []byte) (Profiles, error) { + pb := otlpprofile.ProfilesData{} + err := pb.Unmarshal(buf) + return Profiles(internal.ProfilesFromProto(pb)), err +} diff --git a/pdata/pprofile/pb_test.go b/pdata/pprofile/pb_test.go new file mode 100644 index 00000000000..622510a629c --- /dev/null +++ b/pdata/pprofile/pb_test.go @@ -0,0 +1,78 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pprofile + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestProtoProfilesUnmarshalerError(t *testing.T) { + p := &ProtoUnmarshaler{} + _, err := p.UnmarshalProfiles([]byte("+$%")) + assert.Error(t, err) +} + +func TestProtoSizer(t *testing.T) { + marshaler := &ProtoMarshaler{} + td := NewProfiles() + td.ResourceProfiles().AppendEmpty(). + ScopeProfiles().AppendEmpty(). + Profiles().AppendEmpty(). + Profile(). + StringTable().Append("foobar") + + size := marshaler.ProfilesSize(td) + + bytes, err := marshaler.MarshalProfiles(td) + require.NoError(t, err) + assert.Equal(t, len(bytes), size) +} + +func TestProtoSizerEmptyProfiles(t *testing.T) { + sizer := &ProtoMarshaler{} + assert.Equal(t, 0, sizer.ProfilesSize(NewProfiles())) +} + +func BenchmarkProfilesToProto(b *testing.B) { + marshaler := &ProtoMarshaler{} + profiles := generateBenchmarkProfiles(128) + b.ResetTimer() + for n := 0; n < b.N; n++ { + buf, err := marshaler.MarshalProfiles(profiles) + require.NoError(b, err) + assert.NotEmpty(b, buf) + } +} + +func BenchmarkProfilesFromProto(b *testing.B) { + marshaler := &ProtoMarshaler{} + unmarshaler := &ProtoUnmarshaler{} + baseProfiles := generateBenchmarkProfiles(128) + buf, err := marshaler.MarshalProfiles(baseProfiles) + require.NoError(b, err) + assert.NotEmpty(b, buf) + b.ResetTimer() + b.ReportAllocs() + for n := 0; n < b.N; n++ { + profiles, err := unmarshaler.UnmarshalProfiles(buf) + require.NoError(b, err) + assert.Equal(b, baseProfiles.ResourceProfiles().Len(), profiles.ResourceProfiles().Len()) + } +} + +func generateBenchmarkProfiles(samplesCount int) Profiles { + md := NewProfiles() + ilm := md.ResourceProfiles().AppendEmpty().ScopeProfiles().AppendEmpty().Profiles().AppendEmpty().Profile() + ilm.Sample().EnsureCapacity(samplesCount) + for i := 0; i < samplesCount; i++ { + im := ilm.Sample().AppendEmpty() + im.LocationIndex().Append(1) + im.SetLocationsStartIndex(2) + im.SetLocationsLength(10) + } + return md +} From 28135c1b8962b5c9e06a305ad8b3c9078f1767f3 Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Mon, 9 Sep 2024 13:39:28 +0200 Subject: [PATCH 02/18] introduce consumererror profiles --- .../consumererrorprofiles/Makefile | 1 + .../consumererrorprofiles/go.mod | 30 +++++++ .../consumererrorprofiles/go.sum | 83 +++++++++++++++++++ .../consumererrorprofiles/signalerrors.go | 25 ++++++ .../signalerrors_test.go | 46 ++++++++++ consumer/consumererror/internal/retryable.go | 31 +++++++ consumer/consumererror/signalerrors.go | 40 +++------ 7 files changed, 229 insertions(+), 27 deletions(-) create mode 100644 consumer/consumererror/consumererrorprofiles/Makefile create mode 100644 consumer/consumererror/consumererrorprofiles/go.mod create mode 100644 consumer/consumererror/consumererrorprofiles/go.sum create mode 100644 consumer/consumererror/consumererrorprofiles/signalerrors.go create mode 100644 consumer/consumererror/consumererrorprofiles/signalerrors_test.go create mode 100644 consumer/consumererror/internal/retryable.go diff --git a/consumer/consumererror/consumererrorprofiles/Makefile b/consumer/consumererror/consumererrorprofiles/Makefile new file mode 100644 index 00000000000..bdd863a203b --- /dev/null +++ b/consumer/consumererror/consumererrorprofiles/Makefile @@ -0,0 +1 @@ +include ../../../Makefile.Common diff --git a/consumer/consumererror/consumererrorprofiles/go.mod b/consumer/consumererror/consumererrorprofiles/go.mod new file mode 100644 index 00000000000..d7ed72753dd --- /dev/null +++ b/consumer/consumererror/consumererrorprofiles/go.mod @@ -0,0 +1,30 @@ +module go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles + +go 1.22.0 + +require ( + github.com/stretchr/testify v1.9.0 + go.opentelemetry.io/collector/consumer v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/collector/pdata/pprofile v0.108.1 + go.opentelemetry.io/collector/pdata/testdata v0.108.1 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + go.opentelemetry.io/collector/pdata v1.14.1 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect + google.golang.org/grpc v1.66.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace go.opentelemetry.io/collector/consumer => ../.. diff --git a/consumer/consumererror/consumererrorprofiles/go.sum b/consumer/consumererror/consumererrorprofiles/go.sum new file mode 100644 index 00000000000..5117c125c5e --- /dev/null +++ b/consumer/consumererror/consumererrorprofiles/go.sum @@ -0,0 +1,83 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/collector/pdata v1.14.1 h1:wXZjtQA7Vy5HFqco+yA95ENyMQU5heBB1IxMHQf6mUk= +go.opentelemetry.io/collector/pdata v1.14.1/go.mod h1:z1dTjwwtcoXxZx2/nkHysjxMeaxe9pEmYTEr4SMNIx8= +go.opentelemetry.io/collector/pdata/pprofile v0.108.1 h1:/XbunfZ+/jt1+d1p4zM4vZ/AgeaIJsayjYdlN1fV+tk= +go.opentelemetry.io/collector/pdata/pprofile v0.108.1/go.mod h1:/GvG2WcN9Dajlw4QaIOjgz7N32wSfPL3qxJ0BKOcVPo= +go.opentelemetry.io/collector/pdata/testdata v0.108.1 h1:TpBDoBMBYvC/Ibswe3Ec2eof8XrRrEec6+tfnTeTSGk= +go.opentelemetry.io/collector/pdata/testdata v0.108.1/go.mod h1:PdUmBA4yDRD4Wf0fpCyrpdZexz9EDoHBw5Ot4iIUPRs= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= +google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= +google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/consumer/consumererror/consumererrorprofiles/signalerrors.go b/consumer/consumererror/consumererrorprofiles/signalerrors.go new file mode 100644 index 00000000000..5c01bb64814 --- /dev/null +++ b/consumer/consumererror/consumererrorprofiles/signalerrors.go @@ -0,0 +1,25 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package consumererrorprofiles // import "go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles" + +import ( + "go.opentelemetry.io/collector/consumer/consumererror/internal" + "go.opentelemetry.io/collector/pdata/pprofile" +) + +// Profiles is an error that may carry associated Profile data for a subset of received data +// that failed to be processed or sent. +type Profiles struct { + internal.Retryable[pprofile.Profiles] +} + +// NewProfiles creates a Profiles that can encapsulate received data that failed to be processed or sent. +func NewProfiles(err error, data pprofile.Profiles) error { + return Profiles{ + Retryable: internal.Retryable[pprofile.Profiles]{ + Err: err, + Value: data, + }, + } +} diff --git a/consumer/consumererror/consumererrorprofiles/signalerrors_test.go b/consumer/consumererror/consumererrorprofiles/signalerrors_test.go new file mode 100644 index 00000000000..c9ea6b3ab9d --- /dev/null +++ b/consumer/consumererror/consumererrorprofiles/signalerrors_test.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package consumererrorprofiles // import "go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles" + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/collector/pdata/testdata" +) + +func TestProfiles(t *testing.T) { + td := testdata.GenerateProfiles(1) + err := errors.New("some error") + profileErr := NewProfiles(err, td) + assert.Equal(t, err.Error(), profileErr.Error()) + var target Profiles + assert.False(t, errors.As(nil, &target)) + assert.False(t, errors.As(err, &target)) + assert.True(t, errors.As(profileErr, &target)) + assert.Equal(t, td, target.Data()) +} + +func TestProfiles_Unwrap(t *testing.T) { + td := testdata.GenerateProfiles(1) + var err error = testErrorType{"some error"} + // Wrapping err with error Profiles. + profileErr := NewProfiles(err, td) + target := testErrorType{} + require.NotEqual(t, err, target) + // Unwrapping profileErr for err and assigning to target. + require.True(t, errors.As(profileErr, &target)) + require.Equal(t, err, target) +} + +type testErrorType struct { + s string +} + +func (t testErrorType) Error() string { + return "" +} diff --git a/consumer/consumererror/internal/retryable.go b/consumer/consumererror/internal/retryable.go new file mode 100644 index 00000000000..feed1bc5bc7 --- /dev/null +++ b/consumer/consumererror/internal/retryable.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/consumer/consumererror/internal" + +import ( + "go.opentelemetry.io/collector/pdata/plog" + "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pdata/ptrace" +) + +type Retryable[V ptrace.Traces | pmetric.Metrics | plog.Logs | pprofile.Profiles] struct { + Err error + Value V +} + +// Error provides the error message +func (err Retryable[V]) Error() string { + return err.Err.Error() +} + +// Unwrap returns the wrapped error for functions Is and As in standard package errors. +func (err Retryable[V]) Unwrap() error { + return err.Err +} + +// Data returns the telemetry data that failed to be processed or sent. +func (err Retryable[V]) Data() V { + return err.Value +} diff --git a/consumer/consumererror/signalerrors.go b/consumer/consumererror/signalerrors.go index 1d7558ce1ca..69af253dae7 100644 --- a/consumer/consumererror/signalerrors.go +++ b/consumer/consumererror/signalerrors.go @@ -4,38 +4,24 @@ package consumererror // import "go.opentelemetry.io/collector/consumer/consumererror" import ( + "go.opentelemetry.io/collector/consumer/consumererror/internal" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/ptrace" ) -type retryable[V ptrace.Traces | pmetric.Metrics | plog.Logs] struct { - error - data V -} - -// Unwrap returns the wrapped error for functions Is and As in standard package errors. -func (err retryable[V]) Unwrap() error { - return err.error -} - -// Data returns the telemetry data that failed to be processed or sent. -func (err retryable[V]) Data() V { - return err.data -} - // Traces is an error that may carry associated Trace data for a subset of received data // that failed to be processed or sent. type Traces struct { - retryable[ptrace.Traces] + internal.Retryable[ptrace.Traces] } // NewTraces creates a Traces that can encapsulate received data that failed to be processed or sent. func NewTraces(err error, data ptrace.Traces) error { return Traces{ - retryable: retryable[ptrace.Traces]{ - error: err, - data: data, + Retryable: internal.Retryable[ptrace.Traces]{ + Err: err, + Value: data, }, } } @@ -43,15 +29,15 @@ func NewTraces(err error, data ptrace.Traces) error { // Logs is an error that may carry associated Log data for a subset of received data // that failed to be processed or sent. type Logs struct { - retryable[plog.Logs] + internal.Retryable[plog.Logs] } // NewLogs creates a Logs that can encapsulate received data that failed to be processed or sent. func NewLogs(err error, data plog.Logs) error { return Logs{ - retryable: retryable[plog.Logs]{ - error: err, - data: data, + Retryable: internal.Retryable[plog.Logs]{ + Err: err, + Value: data, }, } } @@ -59,15 +45,15 @@ func NewLogs(err error, data plog.Logs) error { // Metrics is an error that may carry associated Metrics data for a subset of received data // that failed to be processed or sent. type Metrics struct { - retryable[pmetric.Metrics] + internal.Retryable[pmetric.Metrics] } // NewMetrics creates a Metrics that can encapsulate received data that failed to be processed or sent. func NewMetrics(err error, data pmetric.Metrics) error { return Metrics{ - retryable: retryable[pmetric.Metrics]{ - error: err, - data: data, + Retryable: internal.Retryable[pmetric.Metrics]{ + Err: err, + Value: data, }, } } From 09f5ec3a39becf8ffa4a19851459f5deef455c96 Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Tue, 10 Sep 2024 10:19:45 +0200 Subject: [PATCH 03/18] add profiles to mdatagen validation --- cmd/mdatagen/validate.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/cmd/mdatagen/validate.go b/cmd/mdatagen/validate.go index 63108f29470..133cbcfc444 100644 --- a/cmd/mdatagen/validate.go +++ b/cmd/mdatagen/validate.go @@ -95,15 +95,23 @@ func (s *Status) validateStability() error { if c != "metrics" && c != "traces" && c != "logs" && + c != "profiles" && c != "traces_to_traces" && c != "traces_to_metrics" && c != "traces_to_logs" && + c != "traces_to_profiles" && c != "metrics_to_traces" && c != "metrics_to_metrics" && c != "metrics_to_logs" && + c != "metrics_to_profiles" && c != "logs_to_traces" && c != "logs_to_metrics" && c != "logs_to_logs" && + c != "logs_to_profiles" && + c != "profiles_to_profiles" && + c != "profiles_to_traces" && + c != "profiles_to_metrics" && + c != "profiles_to_logs" && c != "extension" { errs = errors.Join(errs, fmt.Errorf("invalid component: %v", c)) } From 1a8b0f629bfeef582241ebab44d8dad5fd8f3476 Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Fri, 6 Sep 2024 15:37:17 +0200 Subject: [PATCH 04/18] generate samples in profiles test data --- pdata/testdata/profile.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/pdata/testdata/profile.go b/pdata/testdata/profile.go index 67233b700c6..9a85b3373e0 100644 --- a/pdata/testdata/profile.go +++ b/pdata/testdata/profile.go @@ -37,10 +37,28 @@ func fillProfileOne(profile pprofile.ProfileContainer) { profile.SetStartTime(profileStartTimestamp) profile.SetEndTime(profileEndTimestamp) profile.SetDroppedAttributesCount(1) + + sample := profile.Profile().Sample().AppendEmpty() + sample.LocationIndex().Append(1) + sample.SetLocationsStartIndex(2) + sample.SetLocationsLength(10) + sample.SetStacktraceIdIndex(3) + sample.Value().Append(4) + sample.SetLink(42) + sample.Attributes().Append(5) } func fillProfileTwo(profile pprofile.ProfileContainer) { profile.SetProfileID([16]byte{0x02, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10}) profile.SetStartTime(profileStartTimestamp) profile.SetEndTime(profileEndTimestamp) + + sample := profile.Profile().Sample().AppendEmpty() + sample.LocationIndex().Append(6) + sample.SetLocationsStartIndex(7) + sample.SetLocationsLength(20) + sample.SetStacktraceIdIndex(8) + sample.Value().Append(9) + sample.SetLink(44) + sample.Attributes().Append(10) } From dcf33e9663fbeff3beee774cbc655453df58da30 Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Tue, 10 Sep 2024 11:14:48 +0200 Subject: [PATCH 05/18] add sample count in consumer test --- consumer/consumertest/sink.go | 14 ++++++++++++-- consumer/consumertest/sink_test.go | 2 ++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/consumer/consumertest/sink.go b/consumer/consumertest/sink.go index ec35e717ae0..a6d2a424ee1 100644 --- a/consumer/consumertest/sink.go +++ b/consumer/consumertest/sink.go @@ -163,8 +163,9 @@ func (sle *LogsSink) Reset() { // stores all profiles and allows querying them for testing. type ProfilesSink struct { nonMutatingConsumer - mu sync.Mutex - profiles []pprofile.Profiles + mu sync.Mutex + profiles []pprofile.Profiles + sampleCount int } var _ consumerprofiles.Profiles = (*ProfilesSink)(nil) @@ -175,6 +176,7 @@ func (ste *ProfilesSink) ConsumeProfiles(_ context.Context, td pprofile.Profiles defer ste.mu.Unlock() ste.profiles = append(ste.profiles, td) + ste.sampleCount += td.SampleCount() return nil } @@ -189,10 +191,18 @@ func (ste *ProfilesSink) AllProfiles() []pprofile.Profiles { return copyProfiles } +// ProfileRecordCount returns the number of profiles stored by this sink since last Reset. +func (ste *ProfilesSink) SampleCount() int { + ste.mu.Lock() + defer ste.mu.Unlock() + return ste.sampleCount +} + // Reset deletes any stored data. func (ste *ProfilesSink) Reset() { ste.mu.Lock() defer ste.mu.Unlock() ste.profiles = nil + ste.sampleCount = 0 } diff --git a/consumer/consumertest/sink_test.go b/consumer/consumertest/sink_test.go index 3a377345fc4..5d7f7f3bf8a 100644 --- a/consumer/consumertest/sink_test.go +++ b/consumer/consumertest/sink_test.go @@ -71,6 +71,8 @@ func TestProfilesSink(t *testing.T) { want = append(want, td) } assert.Equal(t, want, sink.AllProfiles()) + assert.Equal(t, len(want), sink.SampleCount()) sink.Reset() assert.Empty(t, sink.AllProfiles()) + assert.Empty(t, sink.SampleCount()) } From aae4a9b89675a34b6001367a225e90394af4c109 Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Tue, 10 Sep 2024 11:32:45 +0200 Subject: [PATCH 06/18] add sample metrics to componenttest --- component/componenttest/obsreporttest.go | 10 ++++++++++ component/componenttest/otelprometheuschecker.go | 4 ++++ component/componenttest/testdata/prometheus_response | 6 ++++++ 3 files changed, 20 insertions(+) diff --git a/component/componenttest/obsreporttest.go b/component/componenttest/obsreporttest.go index 9e780315017..76d77adc9f9 100644 --- a/component/componenttest/obsreporttest.go +++ b/component/componenttest/obsreporttest.go @@ -56,6 +56,12 @@ func (tts *TestTelemetry) CheckExporterMetrics(sentMetricsPoints, sendFailedMetr return tts.prometheusChecker.checkExporterMetrics(tts.id, sentMetricsPoints, sendFailedMetricsPoints) } +// CheckExporterProfiles checks that for the current exported values for profiles exporter metrics match given values. +// Note: SetupTelemetry must be called before this function. +func (tts *TestTelemetry) CheckExporterProfiles(sentSamples, sendFailedSamples int64) error { + return tts.prometheusChecker.checkExporterProfiles(tts.id, sentSamples, sendFailedSamples) +} + func (tts *TestTelemetry) CheckExporterEnqueueFailedMetrics(enqueueFailed int64) error { return tts.prometheusChecker.checkExporterEnqueueFailed(tts.id, "metric_points", enqueueFailed) } @@ -68,6 +74,10 @@ func (tts *TestTelemetry) CheckExporterEnqueueFailedLogs(enqueueFailed int64) er return tts.prometheusChecker.checkExporterEnqueueFailed(tts.id, "log_records", enqueueFailed) } +func (tts *TestTelemetry) CheckExporterEnqueueFailedProfiles(enqueueFailed int64) error { + return tts.prometheusChecker.checkExporterEnqueueFailed(tts.id, "samples", enqueueFailed) +} + // CheckExporterLogs checks that for the current exported values for logs exporter metrics match given values. // Note: SetupTelemetry must be called before this function. func (tts *TestTelemetry) CheckExporterLogs(sentLogRecords, sendFailedLogRecords int64) error { diff --git a/component/componenttest/otelprometheuschecker.go b/component/componenttest/otelprometheuschecker.go index 6a63617c206..aea40dcad89 100644 --- a/component/componenttest/otelprometheuschecker.go +++ b/component/componenttest/otelprometheuschecker.go @@ -82,6 +82,10 @@ func (pc *prometheusChecker) checkExporterMetrics(exporter component.ID, sent, s return pc.checkExporter(exporter, "metric_points", sent, sendFailed) } +func (pc *prometheusChecker) checkExporterProfiles(exporter component.ID, sent, sendFailed int64) error { + return pc.checkExporter(exporter, "samples", sent, sendFailed) +} + func (pc *prometheusChecker) checkExporter(exporter component.ID, datatype string, sent, sendFailed int64) error { exporterAttrs := attributesForExporterMetrics(exporter) errs := pc.checkCounter(fmt.Sprintf("exporter_sent_%s", datatype), sent, exporterAttrs) diff --git a/component/componenttest/testdata/prometheus_response b/component/componenttest/testdata/prometheus_response index 9d0eb69ee7f..4243df1d5ef 100644 --- a/component/componenttest/testdata/prometheus_response +++ b/component/componenttest/testdata/prometheus_response @@ -16,6 +16,12 @@ otelcol_exporter_send_failed_log_records{exporter="fakeExporter"} 36 # HELP otelcol_exporter_sent_log_records Number of logs successfully sent to destination. # TYPE otelcol_exporter_sent_log_records counter otelcol_exporter_sent_log_records{exporter="fakeExporter"} 103 +# HELP otelcol_exporter_send_failed_samples Number of samples in failed attempts to send to destination. +# TYPE otelcol_exporter_send_failed_samples counter +otelcol_exporter_send_failed_samples{exporter="fakeExporter"} 14 +# HELP otelcol_exporter_sent_samples Number of samples successfully sent to destination. +# TYPE otelcol_exporter_sent_samples counter +otelcol_exporter_sent_samples{exporter="fakeExporter"} 43 # HELP otelcol_processor_accepted_spans Number of spans successfully pushed into the next component in the pipeline. # TYPE otelcol_processor_accepted_spans counter otelcol_processor_accepted_spans{processor="fakeProcessor"} 42 From 492e3cbd652e724a9aed11e4d3660b1b5d4c2f6c Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Wed, 11 Sep 2024 10:45:54 +0200 Subject: [PATCH 07/18] add profiles in exporterhelper --- exporter/exporterhelper/constants.go | 4 + exporter/exporterhelper/documentation.md | 24 + .../internal/metadata/generated_telemetry.go | 21 + exporter/exporterhelper/metadata.yaml | 26 +- exporter/exporterhelper/obsexporter.go | 20 + exporter/exporterhelper/profiles.go | 166 +++++++ exporter/exporterhelper/profiles_batch.go | 142 ++++++ .../exporterhelper/profiles_batch_test.go | 154 ++++++ exporter/exporterhelper/profiles_test.go | 441 ++++++++++++++++++ exporter/exporterhelper/request_test.go | 14 +- 10 files changed, 1007 insertions(+), 5 deletions(-) create mode 100644 exporter/exporterhelper/profiles.go create mode 100644 exporter/exporterhelper/profiles_batch.go create mode 100644 exporter/exporterhelper/profiles_batch_test.go create mode 100644 exporter/exporterhelper/profiles_test.go diff --git a/exporter/exporterhelper/constants.go b/exporter/exporterhelper/constants.go index 57829f08c04..74d7f0a4b7b 100644 --- a/exporter/exporterhelper/constants.go +++ b/exporter/exporterhelper/constants.go @@ -24,4 +24,8 @@ var ( errNilMetricsConverter = errors.New("nil RequestFromMetricsFunc") // errNilLogsConverter is returned when a nil RequestFromLogsFunc is given. errNilLogsConverter = errors.New("nil RequestFromLogsFunc") + // errNilPushProfileData is returned when a nil PushProfiles is given. + errNilPushProfileData = errors.New("nil PushProfiles") + // errNilProfilesConverter is returned when a nil RequestFromProfilesFunc is given. + errNilProfilesConverter = errors.New("nil RequestFromProfilesFunc") ) diff --git a/exporter/exporterhelper/documentation.md b/exporter/exporterhelper/documentation.md index a82163a2bfa..6a5bc55db24 100644 --- a/exporter/exporterhelper/documentation.md +++ b/exporter/exporterhelper/documentation.md @@ -22,6 +22,14 @@ Number of metric points failed to be added to the sending queue. | ---- | ----------- | ---------- | --------- | | {datapoints} | Sum | Int | true | +### otelcol_exporter_enqueue_failed_samples + +Number of samples failed to be added to the sending queue. + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| {samples} | Sum | Int | true | + ### otelcol_exporter_enqueue_failed_spans Number of spans failed to be added to the sending queue. @@ -62,6 +70,14 @@ Number of metric points in failed attempts to send to destination. | ---- | ----------- | ---------- | --------- | | {datapoints} | Sum | Int | true | +### otelcol_exporter_send_failed_samples + +Number of samples in failed attempts to send to destination. + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| {samples} | Sum | Int | true | + ### otelcol_exporter_send_failed_spans Number of spans in failed attempts to send to destination. @@ -86,6 +102,14 @@ Number of metric points successfully sent to destination. | ---- | ----------- | ---------- | --------- | | {datapoints} | Sum | Int | true | +### otelcol_exporter_sent_samples + +Number of samples successfully sent to destination. + +| Unit | Metric Type | Value Type | Monotonic | +| ---- | ----------- | ---------- | --------- | +| {samples} | Sum | Int | true | + ### otelcol_exporter_sent_spans Number of spans successfully sent to destination. diff --git a/exporter/exporterhelper/internal/metadata/generated_telemetry.go b/exporter/exporterhelper/internal/metadata/generated_telemetry.go index a35625ac0a5..4ae52d8f1fa 100644 --- a/exporter/exporterhelper/internal/metadata/generated_telemetry.go +++ b/exporter/exporterhelper/internal/metadata/generated_telemetry.go @@ -32,14 +32,17 @@ type TelemetryBuilder struct { meter metric.Meter ExporterEnqueueFailedLogRecords metric.Int64Counter ExporterEnqueueFailedMetricPoints metric.Int64Counter + ExporterEnqueueFailedSamples metric.Int64Counter ExporterEnqueueFailedSpans metric.Int64Counter ExporterQueueCapacity metric.Int64ObservableGauge ExporterQueueSize metric.Int64ObservableGauge ExporterSendFailedLogRecords metric.Int64Counter ExporterSendFailedMetricPoints metric.Int64Counter + ExporterSendFailedSamples metric.Int64Counter ExporterSendFailedSpans metric.Int64Counter ExporterSentLogRecords metric.Int64Counter ExporterSentMetricPoints metric.Int64Counter + ExporterSentSamples metric.Int64Counter ExporterSentSpans metric.Int64Counter meters map[configtelemetry.Level]metric.Meter } @@ -104,6 +107,12 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...teleme metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) + builder.ExporterEnqueueFailedSamples, err = builder.meters[configtelemetry.LevelBasic].Int64Counter( + "otelcol_exporter_enqueue_failed_samples", + metric.WithDescription("Number of samples failed to be added to the sending queue."), + metric.WithUnit("{samples}"), + ) + errs = errors.Join(errs, err) builder.ExporterEnqueueFailedSpans, err = builder.meters[configtelemetry.LevelBasic].Int64Counter( "otelcol_exporter_enqueue_failed_spans", metric.WithDescription("Number of spans failed to be added to the sending queue."), @@ -122,6 +131,12 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...teleme metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) + builder.ExporterSendFailedSamples, err = builder.meters[configtelemetry.LevelBasic].Int64Counter( + "otelcol_exporter_send_failed_samples", + metric.WithDescription("Number of samples in failed attempts to send to destination."), + metric.WithUnit("{samples}"), + ) + errs = errors.Join(errs, err) builder.ExporterSendFailedSpans, err = builder.meters[configtelemetry.LevelBasic].Int64Counter( "otelcol_exporter_send_failed_spans", metric.WithDescription("Number of spans in failed attempts to send to destination."), @@ -140,6 +155,12 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...teleme metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) + builder.ExporterSentSamples, err = builder.meters[configtelemetry.LevelBasic].Int64Counter( + "otelcol_exporter_sent_samples", + metric.WithDescription("Number of samples successfully sent to destination."), + metric.WithUnit("{samples}"), + ) + errs = errors.Join(errs, err) builder.ExporterSentSpans, err = builder.meters[configtelemetry.LevelBasic].Int64Counter( "otelcol_exporter_sent_spans", metric.WithDescription("Number of spans successfully sent to destination."), diff --git a/exporter/exporterhelper/metadata.yaml b/exporter/exporterhelper/metadata.yaml index 9156cb95c80..770b3a431f5 100644 --- a/exporter/exporterhelper/metadata.yaml +++ b/exporter/exporterhelper/metadata.yaml @@ -5,7 +5,7 @@ status: class: exporter not_component: true stability: - beta: [traces, metrics, logs] + beta: [traces, metrics, logs, profiles] distributions: [core, contrib] telemetry: @@ -82,6 +82,30 @@ telemetry: value_type: int monotonic: true + exporter_sent_samples: + enabled: true + description: Number of samples successfully sent to destination. + unit: "{samples}" + sum: + value_type: int + monotonic: true + + exporter_send_failed_samples: + enabled: true + description: Number of samples in failed attempts to send to destination. + unit: "{samples}" + sum: + value_type: int + monotonic: true + + exporter_enqueue_failed_samples: + enabled: true + description: Number of samples failed to be added to the sending queue. + unit: "{samples}" + sum: + value_type: int + monotonic: true + exporter_queue_size: enabled: true description: Current size of the retry queue (in batches) diff --git a/exporter/exporterhelper/obsexporter.go b/exporter/exporterhelper/obsexporter.go index 751aaf1aac5..05de9fb96f8 100644 --- a/exporter/exporterhelper/obsexporter.go +++ b/exporter/exporterhelper/obsexporter.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentprofiles" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata" "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" @@ -96,6 +97,20 @@ func (or *obsReport) endLogsOp(ctx context.Context, numLogRecords int, err error endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentLogRecordsKey, obsmetrics.FailedToSendLogRecordsKey) } +// startProfilesOp is called at the start of an Export operation. +// The returned context should be used in other calls to the Exporter functions +// dealing with the same export operation. +func (or *obsReport) startProfilesOp(ctx context.Context) context.Context { + return or.startOp(ctx, obsmetrics.ExportTraceDataOperationSuffix) +} + +// endProfilesOp completes the export operation that was started with startProfilesOp. +func (or *obsReport) endProfilesOp(ctx context.Context, numSpans int, err error) { + numSent, numFailedToSend := toNumItems(numSpans, err) + or.recordMetrics(context.WithoutCancel(ctx), componentprofiles.DataTypeProfiles, numSent, numFailedToSend) + endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentSamplesKey, obsmetrics.FailedToSendSamplesKey) +} + // startOp creates the span used to trace the operation. Returning // the updated context and the created span. func (or *obsReport) startOp(ctx context.Context, operationSuffix string) context.Context { @@ -116,6 +131,9 @@ func (or *obsReport) recordMetrics(ctx context.Context, dataType component.DataT case component.DataTypeLogs: sentMeasure = or.telemetryBuilder.ExporterSentLogRecords failedMeasure = or.telemetryBuilder.ExporterSendFailedLogRecords + case componentprofiles.DataTypeProfiles: + sentMeasure = or.telemetryBuilder.ExporterSentSamples + failedMeasure = or.telemetryBuilder.ExporterSendFailedSamples } sentMeasure.Add(ctx, sent, metric.WithAttributes(or.otelAttrs...)) @@ -153,6 +171,8 @@ func (or *obsReport) recordEnqueueFailure(ctx context.Context, dataType componen enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedMetricPoints case component.DataTypeLogs: enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedLogRecords + case componentprofiles.DataTypeProfiles: + enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedSamples } enqueueFailedMeasure.Add(ctx, failed, metric.WithAttributes(or.otelAttrs...)) diff --git a/exporter/exporterhelper/profiles.go b/exporter/exporterhelper/profiles.go new file mode 100644 index 00000000000..964d77bf51d --- /dev/null +++ b/exporter/exporterhelper/profiles.go @@ -0,0 +1,166 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" + +import ( + "context" + "errors" + + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componentprofiles" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles" + "go.opentelemetry.io/collector/consumer/consumerprofiles" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterprofiles" + "go.opentelemetry.io/collector/exporter/exporterqueue" + "go.opentelemetry.io/collector/exporter/internal/queue" + "go.opentelemetry.io/collector/pdata/pprofile" +) + +var profilesMarshaler = &pprofile.ProtoMarshaler{} +var profilesUnmarshaler = &pprofile.ProtoUnmarshaler{} + +type profilesRequest struct { + pd pprofile.Profiles + pusher consumerprofiles.ConsumeProfilesFunc +} + +func newProfilesRequest(pd pprofile.Profiles, pusher consumerprofiles.ConsumeProfilesFunc) Request { + return &profilesRequest{ + pd: pd, + pusher: pusher, + } +} + +func newProfileRequestUnmarshalerFunc(pusher consumerprofiles.ConsumeProfilesFunc) exporterqueue.Unmarshaler[Request] { + return func(bytes []byte) (Request, error) { + profiles, err := profilesUnmarshaler.UnmarshalProfiles(bytes) + if err != nil { + return nil, err + } + return newProfilesRequest(profiles, pusher), nil + } +} + +func profilesRequestMarshaler(req Request) ([]byte, error) { + return profilesMarshaler.MarshalProfiles(req.(*profilesRequest).pd) +} + +func (req *profilesRequest) OnError(err error) Request { + var profileError consumererrorprofiles.Profiles + if errors.As(err, &profileError) { + return newProfilesRequest(profileError.Data(), req.pusher) + } + return req +} + +func (req *profilesRequest) Export(ctx context.Context) error { + return req.pusher(ctx, req.pd) +} + +func (req *profilesRequest) ItemsCount() int { + return req.pd.SampleCount() +} + +type profileExporter struct { + *baseExporter + consumerprofiles.Profiles +} + +// NewProfilesExporter creates an exporterprofiles.Profiless that records observability metrics and wraps every request with a Span. +func NewProfilesExporter( + ctx context.Context, + set exporter.Settings, + cfg component.Config, + pusher consumerprofiles.ConsumeProfilesFunc, + options ...Option, +) (exporterprofiles.Profiles, error) { + if cfg == nil { + return nil, errNilConfig + } + if pusher == nil { + return nil, errNilPushProfileData + } + profilesOpts := []Option{ + withMarshaler(profilesRequestMarshaler), withUnmarshaler(newProfileRequestUnmarshalerFunc(pusher)), + withBatchFuncs(mergeProfiles, mergeSplitProfiles), + } + return NewProfilesRequestExporter(ctx, set, requestFromProfiles(pusher), append(profilesOpts, options...)...) +} + +// RequestFromProfilesFunc converts pprofile.Profiles into a user-defined Request. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +type RequestFromProfilesFunc func(context.Context, pprofile.Profiles) (Request, error) + +// requestFromProfiles returns a RequestFromProfilesFunc that converts pprofile.Profiles into a Request. +func requestFromProfiles(pusher consumerprofiles.ConsumeProfilesFunc) RequestFromProfilesFunc { + return func(_ context.Context, profiles pprofile.Profiles) (Request, error) { + return newProfilesRequest(profiles, pusher), nil + } +} + +// NewProfilesRequestExporter creates a new profiles exporter based on a custom ProfilesConverter and RequestSender. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +func NewProfilesRequestExporter( + _ context.Context, + set exporter.Settings, + converter RequestFromProfilesFunc, + options ...Option, +) (exporterprofiles.Profiles, error) { + if set.Logger == nil { + return nil, errNilLogger + } + + if converter == nil { + return nil, errNilProfilesConverter + } + + be, err := newBaseExporter(set, componentprofiles.DataTypeProfiles, newProfilesExporterWithObservability, options...) + if err != nil { + return nil, err + } + + tc, err := consumerprofiles.NewProfiles(func(ctx context.Context, pd pprofile.Profiles) error { + req, cErr := converter(ctx, pd) + if cErr != nil { + set.Logger.Error("Failed to convert profiles. Dropping data.", + zap.Int("dropped_samples", pd.SampleCount()), + zap.Error(err)) + return consumererror.NewPermanent(cErr) + } + sErr := be.send(ctx, req) + if errors.Is(sErr, queue.ErrQueueIsFull) { + be.obsrep.recordEnqueueFailure(ctx, componentprofiles.DataTypeProfiles, int64(req.ItemsCount())) + } + return sErr + }, be.consumerOptions...) + + return &profileExporter{ + baseExporter: be, + Profiles: tc, + }, err +} + +type profilesExporterWithObservability struct { + baseRequestSender + obsrep *obsReport +} + +func newProfilesExporterWithObservability(obsrep *obsReport) requestSender { + return &profilesExporterWithObservability{obsrep: obsrep} +} + +func (tewo *profilesExporterWithObservability) send(ctx context.Context, req Request) error { + c := tewo.obsrep.startProfilesOp(ctx) + numSamples := req.ItemsCount() + // Forward the data to the next consumer (this pusher is the next). + err := tewo.nextSender.send(c, req) + tewo.obsrep.endProfilesOp(c, numSamples, err) + return err +} diff --git a/exporter/exporterhelper/profiles_batch.go b/exporter/exporterhelper/profiles_batch.go new file mode 100644 index 00000000000..7c528295ab2 --- /dev/null +++ b/exporter/exporterhelper/profiles_batch.go @@ -0,0 +1,142 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" + +import ( + "context" + "errors" + + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/pdata/pprofile" +) + +// mergeProfiles merges two profiles requests into one. +func mergeProfiles(_ context.Context, r1 Request, r2 Request) (Request, error) { + tr1, ok1 := r1.(*profilesRequest) + tr2, ok2 := r2.(*profilesRequest) + if !ok1 || !ok2 { + return nil, errors.New("invalid input type") + } + tr2.pd.ResourceProfiles().MoveAndAppendTo(tr1.pd.ResourceProfiles()) + return tr1, nil +} + +// mergeSplitProfiles splits and/or merges the profiles into multiple requests based on the MaxSizeConfig. +func mergeSplitProfiles(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r1 Request, r2 Request) ([]Request, error) { + var ( + res []Request + destReq *profilesRequest + capacityLeft = cfg.MaxSizeItems + ) + for _, req := range []Request{r1, r2} { + if req == nil { + continue + } + srcReq, ok := req.(*profilesRequest) + if !ok { + return nil, errors.New("invalid input type") + } + if srcReq.pd.SampleCount() <= capacityLeft { + if destReq == nil { + destReq = srcReq + } else { + srcReq.pd.ResourceProfiles().MoveAndAppendTo(destReq.pd.ResourceProfiles()) + } + capacityLeft -= destReq.pd.SampleCount() + continue + } + + for { + extractedProfiles := extractProfiles(srcReq.pd, capacityLeft) + if extractedProfiles.SampleCount() == 0 { + break + } + capacityLeft -= extractedProfiles.SampleCount() + if destReq == nil { + destReq = &profilesRequest{pd: extractedProfiles, pusher: srcReq.pusher} + } else { + extractedProfiles.ResourceProfiles().MoveAndAppendTo(destReq.pd.ResourceProfiles()) + } + // Create new batch once capacity is reached. + if capacityLeft == 0 { + res = append(res, destReq) + destReq = nil + capacityLeft = cfg.MaxSizeItems + } + } + } + + if destReq != nil { + res = append(res, destReq) + } + return res, nil +} + +// extractProfiles extracts a new profiles with a maximum number of samples. +func extractProfiles(srcProfiles pprofile.Profiles, count int) pprofile.Profiles { + destProfiles := pprofile.NewProfiles() + srcProfiles.ResourceProfiles().RemoveIf(func(srcRS pprofile.ResourceProfiles) bool { + if count == 0 { + return false + } + needToExtract := samplesCount(srcRS) > count + if needToExtract { + srcRS = extractResourceProfiles(srcRS, count) + } + count -= samplesCount(srcRS) + srcRS.MoveTo(destProfiles.ResourceProfiles().AppendEmpty()) + return !needToExtract + }) + return destProfiles +} + +// extractResourceProfiles extracts profiles and returns a new resource profiles with the specified number of profiles. +func extractResourceProfiles(srcRS pprofile.ResourceProfiles, count int) pprofile.ResourceProfiles { + destRS := pprofile.NewResourceProfiles() + destRS.SetSchemaUrl(srcRS.SchemaUrl()) + srcRS.Resource().CopyTo(destRS.Resource()) + srcRS.ScopeProfiles().RemoveIf(func(srcSS pprofile.ScopeProfiles) bool { + if count == 0 { + return false + } + needToExtract := srcSS.Profiles().Len() > count + if needToExtract { + srcSS = extractScopeProfiles(srcSS, count) + } + count -= srcSS.Profiles().Len() + srcSS.MoveTo(destRS.ScopeProfiles().AppendEmpty()) + return !needToExtract + }) + srcRS.Resource().CopyTo(destRS.Resource()) + return destRS +} + +// extractScopeProfiles extracts profiles and returns a new scope profiles with the specified number of profiles. +func extractScopeProfiles(srcSS pprofile.ScopeProfiles, count int) pprofile.ScopeProfiles { + destSS := pprofile.NewScopeProfiles() + destSS.SetSchemaUrl(srcSS.SchemaUrl()) + srcSS.Scope().CopyTo(destSS.Scope()) + srcSS.Profiles().RemoveIf(func(srcProfile pprofile.ProfileContainer) bool { + if count == 0 { + return false + } + srcProfile.MoveTo(destSS.Profiles().AppendEmpty()) + count-- + return true + }) + return destSS +} + +// resourceProfilessCount calculates the total number of profiles in the pdata.ResourceProfiles. +func samplesCount(rs pprofile.ResourceProfiles) int { + count := 0 + rs.ScopeProfiles().RemoveIf(func(ss pprofile.ScopeProfiles) bool { + ss.Profiles().RemoveIf(func(sp pprofile.ProfileContainer) bool { + count += sp.Profile().Sample().Len() + return false + }) + return false + }) + return count +} diff --git a/exporter/exporterhelper/profiles_batch_test.go b/exporter/exporterhelper/profiles_batch_test.go new file mode 100644 index 00000000000..96e0d35e54b --- /dev/null +++ b/exporter/exporterhelper/profiles_batch_test.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exporterhelper + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pdata/testdata" +) + +func TestMergeProfiles(t *testing.T) { + pr1 := &profilesRequest{pd: testdata.GenerateProfiles(2)} + pr2 := &profilesRequest{pd: testdata.GenerateProfiles(3)} + res, err := mergeProfiles(context.Background(), pr1, pr2) + assert.Nil(t, err) + assert.Equal(t, 5, res.(*profilesRequest).pd.SampleCount()) +} + +func TestMergeProfilesInvalidInput(t *testing.T) { + pr1 := &tracesRequest{td: testdata.GenerateTraces(2)} + pr2 := &profilesRequest{pd: testdata.GenerateProfiles(3)} + _, err := mergeProfiles(context.Background(), pr1, pr2) + assert.Error(t, err) +} + +func TestMergeSplitProfiles(t *testing.T) { + tests := []struct { + name string + cfg exporterbatcher.MaxSizeConfig + pr1 Request + pr2 Request + expected []*profilesRequest + }{ + { + name: "both_requests_empty", + cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, + pr1: &profilesRequest{pd: pprofile.NewProfiles()}, + pr2: &profilesRequest{pd: pprofile.NewProfiles()}, + expected: []*profilesRequest{{pd: pprofile.NewProfiles()}}, + }, + { + name: "both_requests_nil", + cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, + pr1: nil, + pr2: nil, + expected: []*profilesRequest{}, + }, + { + name: "first_request_empty", + cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, + pr1: &profilesRequest{pd: pprofile.NewProfiles()}, + pr2: &profilesRequest{pd: testdata.GenerateProfiles(5)}, + expected: []*profilesRequest{{pd: testdata.GenerateProfiles(5)}}, + }, + { + name: "first_requests_nil", + cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, + pr1: nil, + pr2: &profilesRequest{pd: testdata.GenerateProfiles(5)}, + expected: []*profilesRequest{{pd: testdata.GenerateProfiles(5)}}, + }, + { + name: "first_nil_second_empty", + cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, + pr1: nil, + pr2: &profilesRequest{pd: pprofile.NewProfiles()}, + expected: []*profilesRequest{{pd: pprofile.NewProfiles()}}, + }, + { + name: "merge_only", + cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, + pr1: &profilesRequest{pd: testdata.GenerateProfiles(4)}, + pr2: &profilesRequest{pd: testdata.GenerateProfiles(6)}, + expected: []*profilesRequest{{pd: func() pprofile.Profiles { + profiles := testdata.GenerateProfiles(4) + testdata.GenerateProfiles(6).ResourceProfiles().MoveAndAppendTo(profiles.ResourceProfiles()) + return profiles + }()}}, + }, + { + name: "split_only", + cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 4}, + pr1: nil, + pr2: &profilesRequest{pd: testdata.GenerateProfiles(10)}, + expected: []*profilesRequest{ + {pd: testdata.GenerateProfiles(4)}, + {pd: testdata.GenerateProfiles(4)}, + {pd: testdata.GenerateProfiles(2)}, + }, + }, + { + name: "merge_and_split", + cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 10}, + pr1: &profilesRequest{pd: testdata.GenerateProfiles(8)}, + pr2: &profilesRequest{pd: testdata.GenerateProfiles(20)}, + expected: []*profilesRequest{ + {pd: func() pprofile.Profiles { + profiles := testdata.GenerateProfiles(8) + testdata.GenerateProfiles(2).ResourceProfiles().MoveAndAppendTo(profiles.ResourceProfiles()) + return profiles + }()}, + {pd: testdata.GenerateProfiles(10)}, + {pd: testdata.GenerateProfiles(8)}, + }, + }, + { + name: "scope_profiles_split", + cfg: exporterbatcher.MaxSizeConfig{MaxSizeItems: 4}, + pr1: &profilesRequest{pd: func() pprofile.Profiles { + return testdata.GenerateProfiles(6) + }()}, + pr2: nil, + expected: []*profilesRequest{ + {pd: testdata.GenerateProfiles(4)}, + {pd: func() pprofile.Profiles { + return testdata.GenerateProfiles(2) + }()}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + res, err := mergeSplitProfiles(context.Background(), tt.cfg, tt.pr1, tt.pr2) + assert.Nil(t, err) + assert.Equal(t, len(tt.expected), len(res)) + for i, r := range res { + assert.Equal(t, tt.expected[i], r.(*profilesRequest)) + } + }) + + } +} + +func TestMergeSplitProfilesInvalidInput(t *testing.T) { + r1 := &tracesRequest{td: testdata.GenerateTraces(2)} + r2 := &profilesRequest{pd: testdata.GenerateProfiles(3)} + _, err := mergeSplitProfiles(context.Background(), exporterbatcher.MaxSizeConfig{}, r1, r2) + assert.Error(t, err) +} + +func TestExtractProfiles(t *testing.T) { + for i := 0; i < 10; i++ { + ld := testdata.GenerateProfiles(10) + extractedProfiles := extractProfiles(ld, i) + assert.Equal(t, i, extractedProfiles.SampleCount()) + assert.Equal(t, 10-i, ld.SampleCount()) + } +} diff --git a/exporter/exporterhelper/profiles_test.go b/exporter/exporterhelper/profiles_test.go new file mode 100644 index 00000000000..c28f7227141 --- /dev/null +++ b/exporter/exporterhelper/profiles_test.go @@ -0,0 +1,441 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exporterhelper + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/sdk/trace/tracetest" + "go.opentelemetry.io/otel/trace" + nooptrace "go.opentelemetry.io/otel/trace/noop" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/component/componenttest" + "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles" + "go.opentelemetry.io/collector/consumer/consumerprofiles" + "go.opentelemetry.io/collector/consumer/consumertest" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterprofiles" + "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/exporter/internal/queue" + "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" + "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pdata/testdata" +) + +const ( + fakeProfilesParentSpanName = "fake_profiles_parent_span_name" +) + +var ( + fakeProfilesExporterName = component.MustNewIDWithName("fake_profiles_exporter", "with_name") + fakeProfilesExporterConfig = struct{}{} +) + +func TestProfilesRequest(t *testing.T) { + lr := newProfilesRequest(testdata.GenerateProfiles(1), nil) + + profileErr := consumererrorprofiles.NewProfiles(errors.New("some error"), pprofile.NewProfiles()) + assert.EqualValues( + t, + newProfilesRequest(pprofile.NewProfiles(), nil), + lr.(RequestErrorHandler).OnError(profileErr), + ) +} + +func TestProfilesExporter_InvalidName(t *testing.T) { + le, err := NewProfilesExporter(context.Background(), exportertest.NewNopSettings(), nil, newPushProfilesData(nil)) + require.Nil(t, le) + require.Equal(t, errNilConfig, err) +} + +func TestProfilesExporter_NilLogger(t *testing.T) { + le, err := NewProfilesExporter(context.Background(), exporter.Settings{}, &fakeProfilesExporterConfig, newPushProfilesData(nil)) + require.Nil(t, le) + require.Equal(t, errNilLogger, err) +} + +func TestProfilesRequestExporter_NilLogger(t *testing.T) { + le, err := NewProfilesRequestExporter(context.Background(), exporter.Settings{}, (&fakeRequestConverter{}).requestFromProfilesFunc) + require.Nil(t, le) + require.Equal(t, errNilLogger, err) +} + +func TestProfilesExporter_NilPushProfilesData(t *testing.T) { + le, err := NewProfilesExporter(context.Background(), exportertest.NewNopSettings(), &fakeProfilesExporterConfig, nil) + require.Nil(t, le) + require.Equal(t, errNilPushProfileData, err) +} + +func TestProfilesRequestExporter_NilProfilesConverter(t *testing.T) { + le, err := NewProfilesRequestExporter(context.Background(), exportertest.NewNopSettings(), nil) + require.Nil(t, le) + require.Equal(t, errNilProfilesConverter, err) +} + +func TestProfilesExporter_Default(t *testing.T) { + ld := pprofile.NewProfiles() + le, err := NewProfilesExporter(context.Background(), exportertest.NewNopSettings(), &fakeProfilesExporterConfig, newPushProfilesData(nil)) + assert.NotNil(t, le) + assert.NoError(t, err) + + assert.Equal(t, consumer.Capabilities{MutatesData: false}, le.Capabilities()) + assert.NoError(t, le.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, le.ConsumeProfiles(context.Background(), ld)) + assert.NoError(t, le.Shutdown(context.Background())) +} + +func TestProfilesRequestExporter_Default(t *testing.T) { + ld := pprofile.NewProfiles() + le, err := NewProfilesRequestExporter(context.Background(), exportertest.NewNopSettings(), + (&fakeRequestConverter{}).requestFromProfilesFunc) + assert.NotNil(t, le) + assert.NoError(t, err) + + assert.Equal(t, consumer.Capabilities{MutatesData: false}, le.Capabilities()) + assert.NoError(t, le.Start(context.Background(), componenttest.NewNopHost())) + assert.NoError(t, le.ConsumeProfiles(context.Background(), ld)) + assert.NoError(t, le.Shutdown(context.Background())) +} + +func TestProfilesExporter_WithCapabilities(t *testing.T) { + capabilities := consumer.Capabilities{MutatesData: true} + le, err := NewProfilesExporter(context.Background(), exportertest.NewNopSettings(), &fakeProfilesExporterConfig, newPushProfilesData(nil), WithCapabilities(capabilities)) + require.NoError(t, err) + require.NotNil(t, le) + + assert.Equal(t, capabilities, le.Capabilities()) +} + +func TestProfilesRequestExporter_WithCapabilities(t *testing.T) { + capabilities := consumer.Capabilities{MutatesData: true} + le, err := NewProfilesRequestExporter(context.Background(), exportertest.NewNopSettings(), + (&fakeRequestConverter{}).requestFromProfilesFunc, WithCapabilities(capabilities)) + require.NoError(t, err) + require.NotNil(t, le) + + assert.Equal(t, capabilities, le.Capabilities()) +} + +func TestProfilesExporter_Default_ReturnError(t *testing.T) { + ld := pprofile.NewProfiles() + want := errors.New("my_error") + le, err := NewProfilesExporter(context.Background(), exportertest.NewNopSettings(), &fakeProfilesExporterConfig, newPushProfilesData(want)) + require.NoError(t, err) + require.NotNil(t, le) + require.Equal(t, want, le.ConsumeProfiles(context.Background(), ld)) +} + +func TestProfilesRequestExporter_Default_ConvertError(t *testing.T) { + ld := pprofile.NewProfiles() + want := errors.New("convert_error") + le, err := NewProfilesRequestExporter(context.Background(), exportertest.NewNopSettings(), + (&fakeRequestConverter{profilesError: want}).requestFromProfilesFunc) + require.NoError(t, err) + require.NotNil(t, le) + require.Equal(t, consumererror.NewPermanent(want), le.ConsumeProfiles(context.Background(), ld)) +} + +func TestProfilesRequestExporter_Default_ExportError(t *testing.T) { + ld := pprofile.NewProfiles() + want := errors.New("export_error") + le, err := NewProfilesRequestExporter(context.Background(), exportertest.NewNopSettings(), + (&fakeRequestConverter{requestError: want}).requestFromProfilesFunc) + require.NoError(t, err) + require.NotNil(t, le) + require.Equal(t, want, le.ConsumeProfiles(context.Background(), ld)) +} + +func TestProfilesExporter_WithPersistentQueue(t *testing.T) { + qCfg := NewDefaultQueueSettings() + storageID := component.MustNewIDWithName("file_storage", "storage") + qCfg.StorageID = &storageID + rCfg := configretry.NewDefaultBackOffConfig() + ts := consumertest.ProfilesSink{} + set := exportertest.NewNopSettings() + set.ID = component.MustNewIDWithName("test_profiles", "with_persistent_queue") + te, err := NewProfilesExporter(context.Background(), set, &fakeProfilesExporterConfig, ts.ConsumeProfiles, WithRetry(rCfg), WithQueue(qCfg)) + require.NoError(t, err) + + host := &mockHost{ext: map[component.ID]component.Component{ + storageID: queue.NewMockStorageExtension(nil), + }} + require.NoError(t, te.Start(context.Background(), host)) + t.Cleanup(func() { require.NoError(t, te.Shutdown(context.Background())) }) + + traces := testdata.GenerateProfiles(2) + require.NoError(t, te.ConsumeProfiles(context.Background(), traces)) + require.Eventually(t, func() bool { + return len(ts.AllProfiles()) == 1 && ts.SampleCount() == 2 + }, 500*time.Millisecond, 10*time.Millisecond) +} + +func TestProfilesExporter_WithRecordMetrics(t *testing.T) { + tt, err := componenttest.SetupTelemetry(fakeProfilesExporterName) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) + + le, err := NewProfilesExporter(context.Background(), exporter.Settings{ID: fakeProfilesExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, &fakeProfilesExporterConfig, newPushProfilesData(nil)) + require.NoError(t, err) + require.NotNil(t, le) + + checkRecordedMetricsForProfilesExporter(t, tt, le, nil) +} + +func TestProfilesExporter_pProfileModifiedDownStream_WithRecordMetrics(t *testing.T) { + tt, err := componenttest.SetupTelemetry(fakeProfilesExporterName) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) + + le, err := NewProfilesExporter(context.Background(), exporter.Settings{ID: fakeProfilesExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, &fakeProfilesExporterConfig, newPushProfilesDataModifiedDownstream(nil), WithCapabilities(consumer.Capabilities{MutatesData: true})) + assert.NotNil(t, le) + assert.NoError(t, err) + ld := testdata.GenerateProfiles(2) + + assert.NoError(t, le.ConsumeProfiles(context.Background(), ld)) + assert.Equal(t, 0, ld.SampleCount()) + require.NoError(t, tt.CheckExporterProfiles(int64(2), 0)) +} + +func TestProfilesRequestExporter_WithRecordMetrics(t *testing.T) { + tt, err := componenttest.SetupTelemetry(fakeProfilesExporterName) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) + + le, err := NewProfilesRequestExporter(context.Background(), + exporter.Settings{ID: fakeProfilesExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + (&fakeRequestConverter{}).requestFromProfilesFunc) + require.NoError(t, err) + require.NotNil(t, le) + + checkRecordedMetricsForProfilesExporter(t, tt, le, nil) +} + +func TestProfilesExporter_WithRecordMetrics_ReturnError(t *testing.T) { + want := errors.New("my_error") + tt, err := componenttest.SetupTelemetry(fakeProfilesExporterName) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) + + le, err := NewProfilesExporter(context.Background(), exporter.Settings{ID: fakeProfilesExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, &fakeProfilesExporterConfig, newPushProfilesData(want)) + require.Nil(t, err) + require.NotNil(t, le) + + checkRecordedMetricsForProfilesExporter(t, tt, le, want) +} + +func TestProfilesRequestExporter_WithRecordMetrics_ExportError(t *testing.T) { + want := errors.New("export_error") + tt, err := componenttest.SetupTelemetry(fakeProfilesExporterName) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) + + le, err := NewProfilesRequestExporter(context.Background(), exporter.Settings{ID: fakeProfilesExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + (&fakeRequestConverter{requestError: want}).requestFromProfilesFunc) + require.Nil(t, err) + require.NotNil(t, le) + + checkRecordedMetricsForProfilesExporter(t, tt, le, want) +} + +func TestProfilesExporter_WithRecordEnqueueFailedMetrics(t *testing.T) { + tt, err := componenttest.SetupTelemetry(fakeProfilesExporterName) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) + + rCfg := configretry.NewDefaultBackOffConfig() + qCfg := NewDefaultQueueSettings() + qCfg.NumConsumers = 1 + qCfg.QueueSize = 2 + wantErr := errors.New("some-error") + te, err := NewProfilesExporter(context.Background(), exporter.Settings{ID: fakeProfilesExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, &fakeProfilesExporterConfig, newPushProfilesData(wantErr), WithRetry(rCfg), WithQueue(qCfg)) + require.NoError(t, err) + require.NotNil(t, te) + + md := testdata.GenerateProfiles(3) + const numBatches = 7 + for i := 0; i < numBatches; i++ { + // errors are checked in the checkExporterEnqueueFailedProfilesStats function below. + _ = te.ConsumeProfiles(context.Background(), md) + } + + // 2 batched must be in queue, and 5 batches (15 profile records) rejected due to queue overflow + require.NoError(t, tt.CheckExporterEnqueueFailedProfiles(int64(15))) +} + +func TestProfilesExporter_WithSpan(t *testing.T) { + set := exportertest.NewNopSettings() + sr := new(tracetest.SpanRecorder) + set.TracerProvider = sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr)) + otel.SetTracerProvider(set.TracerProvider) + defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) + + le, err := NewProfilesExporter(context.Background(), set, &fakeProfilesExporterConfig, newPushProfilesData(nil)) + require.Nil(t, err) + require.NotNil(t, le) + checkWrapSpanForProfilesExporter(t, sr, set.TracerProvider.Tracer("test"), le, nil, 1) +} + +func TestProfilesRequestExporter_WithSpan(t *testing.T) { + set := exportertest.NewNopSettings() + sr := new(tracetest.SpanRecorder) + set.TracerProvider = sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr)) + otel.SetTracerProvider(set.TracerProvider) + defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) + + le, err := NewProfilesRequestExporter(context.Background(), set, (&fakeRequestConverter{}).requestFromProfilesFunc) + require.Nil(t, err) + require.NotNil(t, le) + checkWrapSpanForProfilesExporter(t, sr, set.TracerProvider.Tracer("test"), le, nil, 1) +} + +func TestProfilesExporter_WithSpan_ReturnError(t *testing.T) { + set := exportertest.NewNopSettings() + sr := new(tracetest.SpanRecorder) + set.TracerProvider = sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr)) + otel.SetTracerProvider(set.TracerProvider) + defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) + + want := errors.New("my_error") + le, err := NewProfilesExporter(context.Background(), set, &fakeProfilesExporterConfig, newPushProfilesData(want)) + require.Nil(t, err) + require.NotNil(t, le) + checkWrapSpanForProfilesExporter(t, sr, set.TracerProvider.Tracer("test"), le, want, 1) +} + +func TestProfilesRequestExporter_WithSpan_ReturnError(t *testing.T) { + set := exportertest.NewNopSettings() + sr := new(tracetest.SpanRecorder) + set.TracerProvider = sdktrace.NewTracerProvider(sdktrace.WithSpanProcessor(sr)) + otel.SetTracerProvider(set.TracerProvider) + defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) + + want := errors.New("my_error") + le, err := NewProfilesRequestExporter(context.Background(), set, (&fakeRequestConverter{requestError: want}).requestFromProfilesFunc) + require.Nil(t, err) + require.NotNil(t, le) + checkWrapSpanForProfilesExporter(t, sr, set.TracerProvider.Tracer("test"), le, want, 1) +} + +func TestProfilesExporter_WithShutdown(t *testing.T) { + shutdownCalled := false + shutdown := func(context.Context) error { shutdownCalled = true; return nil } + + le, err := NewProfilesExporter(context.Background(), exportertest.NewNopSettings(), &fakeProfilesExporterConfig, newPushProfilesData(nil), WithShutdown(shutdown)) + assert.NotNil(t, le) + assert.NoError(t, err) + + assert.Nil(t, le.Shutdown(context.Background())) + assert.True(t, shutdownCalled) +} + +func TestProfilesRequestExporter_WithShutdown(t *testing.T) { + shutdownCalled := false + shutdown := func(context.Context) error { shutdownCalled = true; return nil } + + le, err := NewProfilesRequestExporter(context.Background(), exportertest.NewNopSettings(), + (&fakeRequestConverter{}).requestFromProfilesFunc, WithShutdown(shutdown)) + assert.NotNil(t, le) + assert.NoError(t, err) + + assert.Nil(t, le.Shutdown(context.Background())) + assert.True(t, shutdownCalled) +} + +func TestProfilesExporter_WithShutdown_ReturnError(t *testing.T) { + want := errors.New("my_error") + shutdownErr := func(context.Context) error { return want } + + le, err := NewProfilesExporter(context.Background(), exportertest.NewNopSettings(), &fakeProfilesExporterConfig, newPushProfilesData(nil), WithShutdown(shutdownErr)) + assert.NotNil(t, le) + assert.NoError(t, err) + + assert.Equal(t, le.Shutdown(context.Background()), want) +} + +func TestProfilesRequestExporter_WithShutdown_ReturnError(t *testing.T) { + want := errors.New("my_error") + shutdownErr := func(context.Context) error { return want } + + le, err := NewProfilesRequestExporter(context.Background(), exportertest.NewNopSettings(), + (&fakeRequestConverter{}).requestFromProfilesFunc, WithShutdown(shutdownErr)) + assert.NotNil(t, le) + assert.NoError(t, err) + + assert.Equal(t, le.Shutdown(context.Background()), want) +} + +func newPushProfilesDataModifiedDownstream(retError error) consumerprofiles.ConsumeProfilesFunc { + return func(_ context.Context, profile pprofile.Profiles) error { + profile.ResourceProfiles().MoveAndAppendTo(pprofile.NewResourceProfilesSlice()) + return retError + } +} + +func newPushProfilesData(retError error) consumerprofiles.ConsumeProfilesFunc { + return func(_ context.Context, _ pprofile.Profiles) error { + return retError + } +} + +func checkRecordedMetricsForProfilesExporter(t *testing.T, tt componenttest.TestTelemetry, le exporterprofiles.Profiles, wantError error) { + ld := testdata.GenerateProfiles(2) + const numBatches = 7 + for i := 0; i < numBatches; i++ { + require.Equal(t, wantError, le.ConsumeProfiles(context.Background(), ld)) + } + + // TODO: When the new metrics correctly count partial dropped fix this. + if wantError != nil { + require.NoError(t, tt.CheckExporterProfiles(0, int64(numBatches*ld.SampleCount()))) + } else { + require.NoError(t, tt.CheckExporterProfiles(int64(numBatches*ld.SampleCount()), 0)) + } +} + +func generateProfilesTraffic(t *testing.T, tracer trace.Tracer, le exporterprofiles.Profiles, numRequests int, wantError error) { + ld := testdata.GenerateProfiles(1) + ctx, span := tracer.Start(context.Background(), fakeProfilesParentSpanName) + defer span.End() + for i := 0; i < numRequests; i++ { + require.Equal(t, wantError, le.ConsumeProfiles(ctx, ld)) + } +} + +func checkWrapSpanForProfilesExporter(t *testing.T, sr *tracetest.SpanRecorder, tracer trace.Tracer, le exporterprofiles.Profiles, + wantError error, numProfileRecords int64) { // nolint: unparam + const numRequests = 5 + generateProfilesTraffic(t, tracer, le, numRequests, wantError) + + // Inspection time! + gotSpanData := sr.Ended() + require.Equal(t, numRequests+1, len(gotSpanData)) + + parentSpan := gotSpanData[numRequests] + require.Equalf(t, fakeProfilesParentSpanName, parentSpan.Name(), "SpanData %v", parentSpan) + for _, sd := range gotSpanData[:numRequests] { + require.Equalf(t, parentSpan.SpanContext(), sd.Parent(), "Exporter span not a child\nSpanData %v", sd) + checkStatus(t, sd, wantError) + + sentProfileRecords := numProfileRecords + var failedToSendProfileRecords int64 + if wantError != nil { + sentProfileRecords = 0 + failedToSendProfileRecords = numProfileRecords + } + require.Containsf(t, sd.Attributes(), attribute.KeyValue{Key: obsmetrics.SentSamplesKey, Value: attribute.Int64Value(sentProfileRecords)}, "SpanData %v", sd) + require.Containsf(t, sd.Attributes(), attribute.KeyValue{Key: obsmetrics.FailedToSendSamplesKey, Value: attribute.Int64Value(failedToSendProfileRecords)}, "SpanData %v", sd) + } +} diff --git a/exporter/exporterhelper/request_test.go b/exporter/exporterhelper/request_test.go index fe373c67e12..5f48b2b674f 100644 --- a/exporter/exporterhelper/request_test.go +++ b/exporter/exporterhelper/request_test.go @@ -11,6 +11,7 @@ import ( "go.opentelemetry.io/collector/exporter/exporterbatcher" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" + "go.opentelemetry.io/collector/pdata/pprofile" "go.opentelemetry.io/collector/pdata/ptrace" ) @@ -117,10 +118,11 @@ func fakeBatchMergeSplitFunc(ctx context.Context, cfg exporterbatcher.MaxSizeCon } type fakeRequestConverter struct { - metricsError error - tracesError error - logsError error - requestError error + metricsError error + tracesError error + logsError error + profilesError error + requestError error } func (frc *fakeRequestConverter) requestFromMetricsFunc(_ context.Context, md pmetric.Metrics) (Request, error) { @@ -134,3 +136,7 @@ func (frc *fakeRequestConverter) requestFromTracesFunc(_ context.Context, md ptr func (frc *fakeRequestConverter) requestFromLogsFunc(_ context.Context, md plog.Logs) (Request, error) { return &fakeRequest{items: md.LogRecordCount(), exportErr: frc.requestError}, frc.logsError } + +func (frc *fakeRequestConverter) requestFromProfilesFunc(_ context.Context, md pprofile.Profiles) (Request, error) { + return &fakeRequest{items: md.SampleCount(), exportErr: frc.requestError}, frc.profilesError +} From cb0c69e3ff21ba36e740de53fab7ffb4432fe3f6 Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Wed, 11 Sep 2024 11:02:28 +0200 Subject: [PATCH 08/18] remove metrics from profiles --- exporter/exporterhelper/documentation.md | 24 ---- .../internal/metadata/generated_telemetry.go | 21 ---- exporter/exporterhelper/metadata.yaml | 24 ---- exporter/exporterhelper/obsexporter.go | 7 -- exporter/exporterhelper/profiles.go | 7 +- exporter/exporterhelper/profiles_test.go | 108 ------------------ 6 files changed, 1 insertion(+), 190 deletions(-) diff --git a/exporter/exporterhelper/documentation.md b/exporter/exporterhelper/documentation.md index 6a5bc55db24..a82163a2bfa 100644 --- a/exporter/exporterhelper/documentation.md +++ b/exporter/exporterhelper/documentation.md @@ -22,14 +22,6 @@ Number of metric points failed to be added to the sending queue. | ---- | ----------- | ---------- | --------- | | {datapoints} | Sum | Int | true | -### otelcol_exporter_enqueue_failed_samples - -Number of samples failed to be added to the sending queue. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| {samples} | Sum | Int | true | - ### otelcol_exporter_enqueue_failed_spans Number of spans failed to be added to the sending queue. @@ -70,14 +62,6 @@ Number of metric points in failed attempts to send to destination. | ---- | ----------- | ---------- | --------- | | {datapoints} | Sum | Int | true | -### otelcol_exporter_send_failed_samples - -Number of samples in failed attempts to send to destination. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| {samples} | Sum | Int | true | - ### otelcol_exporter_send_failed_spans Number of spans in failed attempts to send to destination. @@ -102,14 +86,6 @@ Number of metric points successfully sent to destination. | ---- | ----------- | ---------- | --------- | | {datapoints} | Sum | Int | true | -### otelcol_exporter_sent_samples - -Number of samples successfully sent to destination. - -| Unit | Metric Type | Value Type | Monotonic | -| ---- | ----------- | ---------- | --------- | -| {samples} | Sum | Int | true | - ### otelcol_exporter_sent_spans Number of spans successfully sent to destination. diff --git a/exporter/exporterhelper/internal/metadata/generated_telemetry.go b/exporter/exporterhelper/internal/metadata/generated_telemetry.go index 4ae52d8f1fa..a35625ac0a5 100644 --- a/exporter/exporterhelper/internal/metadata/generated_telemetry.go +++ b/exporter/exporterhelper/internal/metadata/generated_telemetry.go @@ -32,17 +32,14 @@ type TelemetryBuilder struct { meter metric.Meter ExporterEnqueueFailedLogRecords metric.Int64Counter ExporterEnqueueFailedMetricPoints metric.Int64Counter - ExporterEnqueueFailedSamples metric.Int64Counter ExporterEnqueueFailedSpans metric.Int64Counter ExporterQueueCapacity metric.Int64ObservableGauge ExporterQueueSize metric.Int64ObservableGauge ExporterSendFailedLogRecords metric.Int64Counter ExporterSendFailedMetricPoints metric.Int64Counter - ExporterSendFailedSamples metric.Int64Counter ExporterSendFailedSpans metric.Int64Counter ExporterSentLogRecords metric.Int64Counter ExporterSentMetricPoints metric.Int64Counter - ExporterSentSamples metric.Int64Counter ExporterSentSpans metric.Int64Counter meters map[configtelemetry.Level]metric.Meter } @@ -107,12 +104,6 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...teleme metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) - builder.ExporterEnqueueFailedSamples, err = builder.meters[configtelemetry.LevelBasic].Int64Counter( - "otelcol_exporter_enqueue_failed_samples", - metric.WithDescription("Number of samples failed to be added to the sending queue."), - metric.WithUnit("{samples}"), - ) - errs = errors.Join(errs, err) builder.ExporterEnqueueFailedSpans, err = builder.meters[configtelemetry.LevelBasic].Int64Counter( "otelcol_exporter_enqueue_failed_spans", metric.WithDescription("Number of spans failed to be added to the sending queue."), @@ -131,12 +122,6 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...teleme metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) - builder.ExporterSendFailedSamples, err = builder.meters[configtelemetry.LevelBasic].Int64Counter( - "otelcol_exporter_send_failed_samples", - metric.WithDescription("Number of samples in failed attempts to send to destination."), - metric.WithUnit("{samples}"), - ) - errs = errors.Join(errs, err) builder.ExporterSendFailedSpans, err = builder.meters[configtelemetry.LevelBasic].Int64Counter( "otelcol_exporter_send_failed_spans", metric.WithDescription("Number of spans in failed attempts to send to destination."), @@ -155,12 +140,6 @@ func NewTelemetryBuilder(settings component.TelemetrySettings, options ...teleme metric.WithUnit("{datapoints}"), ) errs = errors.Join(errs, err) - builder.ExporterSentSamples, err = builder.meters[configtelemetry.LevelBasic].Int64Counter( - "otelcol_exporter_sent_samples", - metric.WithDescription("Number of samples successfully sent to destination."), - metric.WithUnit("{samples}"), - ) - errs = errors.Join(errs, err) builder.ExporterSentSpans, err = builder.meters[configtelemetry.LevelBasic].Int64Counter( "otelcol_exporter_sent_spans", metric.WithDescription("Number of spans successfully sent to destination."), diff --git a/exporter/exporterhelper/metadata.yaml b/exporter/exporterhelper/metadata.yaml index 770b3a431f5..5dad11fb99b 100644 --- a/exporter/exporterhelper/metadata.yaml +++ b/exporter/exporterhelper/metadata.yaml @@ -82,30 +82,6 @@ telemetry: value_type: int monotonic: true - exporter_sent_samples: - enabled: true - description: Number of samples successfully sent to destination. - unit: "{samples}" - sum: - value_type: int - monotonic: true - - exporter_send_failed_samples: - enabled: true - description: Number of samples in failed attempts to send to destination. - unit: "{samples}" - sum: - value_type: int - monotonic: true - - exporter_enqueue_failed_samples: - enabled: true - description: Number of samples failed to be added to the sending queue. - unit: "{samples}" - sum: - value_type: int - monotonic: true - exporter_queue_size: enabled: true description: Current size of the retry queue (in batches) diff --git a/exporter/exporterhelper/obsexporter.go b/exporter/exporterhelper/obsexporter.go index 05de9fb96f8..8fbac386bf2 100644 --- a/exporter/exporterhelper/obsexporter.go +++ b/exporter/exporterhelper/obsexporter.go @@ -12,7 +12,6 @@ import ( "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/component/componentprofiles" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata" "go.opentelemetry.io/collector/internal/obsreportconfig/obsmetrics" @@ -107,7 +106,6 @@ func (or *obsReport) startProfilesOp(ctx context.Context) context.Context { // endProfilesOp completes the export operation that was started with startProfilesOp. func (or *obsReport) endProfilesOp(ctx context.Context, numSpans int, err error) { numSent, numFailedToSend := toNumItems(numSpans, err) - or.recordMetrics(context.WithoutCancel(ctx), componentprofiles.DataTypeProfiles, numSent, numFailedToSend) endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentSamplesKey, obsmetrics.FailedToSendSamplesKey) } @@ -131,9 +129,6 @@ func (or *obsReport) recordMetrics(ctx context.Context, dataType component.DataT case component.DataTypeLogs: sentMeasure = or.telemetryBuilder.ExporterSentLogRecords failedMeasure = or.telemetryBuilder.ExporterSendFailedLogRecords - case componentprofiles.DataTypeProfiles: - sentMeasure = or.telemetryBuilder.ExporterSentSamples - failedMeasure = or.telemetryBuilder.ExporterSendFailedSamples } sentMeasure.Add(ctx, sent, metric.WithAttributes(or.otelAttrs...)) @@ -171,8 +166,6 @@ func (or *obsReport) recordEnqueueFailure(ctx context.Context, dataType componen enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedMetricPoints case component.DataTypeLogs: enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedLogRecords - case componentprofiles.DataTypeProfiles: - enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedSamples } enqueueFailedMeasure.Add(ctx, failed, metric.WithAttributes(or.otelAttrs...)) diff --git a/exporter/exporterhelper/profiles.go b/exporter/exporterhelper/profiles.go index 964d77bf51d..0c079a64a72 100644 --- a/exporter/exporterhelper/profiles.go +++ b/exporter/exporterhelper/profiles.go @@ -17,7 +17,6 @@ import ( "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterprofiles" "go.opentelemetry.io/collector/exporter/exporterqueue" - "go.opentelemetry.io/collector/exporter/internal/queue" "go.opentelemetry.io/collector/pdata/pprofile" ) @@ -134,11 +133,7 @@ func NewProfilesRequestExporter( zap.Error(err)) return consumererror.NewPermanent(cErr) } - sErr := be.send(ctx, req) - if errors.Is(sErr, queue.ErrQueueIsFull) { - be.obsrep.recordEnqueueFailure(ctx, componentprofiles.DataTypeProfiles, int64(req.ItemsCount())) - } - return sErr + return be.send(ctx, req) }, be.consumerOptions...) return &profileExporter{ diff --git a/exporter/exporterhelper/profiles_test.go b/exporter/exporterhelper/profiles_test.go index c28f7227141..19665a23c01 100644 --- a/exporter/exporterhelper/profiles_test.go +++ b/exporter/exporterhelper/profiles_test.go @@ -182,99 +182,6 @@ func TestProfilesExporter_WithPersistentQueue(t *testing.T) { }, 500*time.Millisecond, 10*time.Millisecond) } -func TestProfilesExporter_WithRecordMetrics(t *testing.T) { - tt, err := componenttest.SetupTelemetry(fakeProfilesExporterName) - require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) - - le, err := NewProfilesExporter(context.Background(), exporter.Settings{ID: fakeProfilesExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, &fakeProfilesExporterConfig, newPushProfilesData(nil)) - require.NoError(t, err) - require.NotNil(t, le) - - checkRecordedMetricsForProfilesExporter(t, tt, le, nil) -} - -func TestProfilesExporter_pProfileModifiedDownStream_WithRecordMetrics(t *testing.T) { - tt, err := componenttest.SetupTelemetry(fakeProfilesExporterName) - require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) - - le, err := NewProfilesExporter(context.Background(), exporter.Settings{ID: fakeProfilesExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, &fakeProfilesExporterConfig, newPushProfilesDataModifiedDownstream(nil), WithCapabilities(consumer.Capabilities{MutatesData: true})) - assert.NotNil(t, le) - assert.NoError(t, err) - ld := testdata.GenerateProfiles(2) - - assert.NoError(t, le.ConsumeProfiles(context.Background(), ld)) - assert.Equal(t, 0, ld.SampleCount()) - require.NoError(t, tt.CheckExporterProfiles(int64(2), 0)) -} - -func TestProfilesRequestExporter_WithRecordMetrics(t *testing.T) { - tt, err := componenttest.SetupTelemetry(fakeProfilesExporterName) - require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) - - le, err := NewProfilesRequestExporter(context.Background(), - exporter.Settings{ID: fakeProfilesExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, - (&fakeRequestConverter{}).requestFromProfilesFunc) - require.NoError(t, err) - require.NotNil(t, le) - - checkRecordedMetricsForProfilesExporter(t, tt, le, nil) -} - -func TestProfilesExporter_WithRecordMetrics_ReturnError(t *testing.T) { - want := errors.New("my_error") - tt, err := componenttest.SetupTelemetry(fakeProfilesExporterName) - require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) - - le, err := NewProfilesExporter(context.Background(), exporter.Settings{ID: fakeProfilesExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, &fakeProfilesExporterConfig, newPushProfilesData(want)) - require.Nil(t, err) - require.NotNil(t, le) - - checkRecordedMetricsForProfilesExporter(t, tt, le, want) -} - -func TestProfilesRequestExporter_WithRecordMetrics_ExportError(t *testing.T) { - want := errors.New("export_error") - tt, err := componenttest.SetupTelemetry(fakeProfilesExporterName) - require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) - - le, err := NewProfilesRequestExporter(context.Background(), exporter.Settings{ID: fakeProfilesExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, - (&fakeRequestConverter{requestError: want}).requestFromProfilesFunc) - require.Nil(t, err) - require.NotNil(t, le) - - checkRecordedMetricsForProfilesExporter(t, tt, le, want) -} - -func TestProfilesExporter_WithRecordEnqueueFailedMetrics(t *testing.T) { - tt, err := componenttest.SetupTelemetry(fakeProfilesExporterName) - require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) - - rCfg := configretry.NewDefaultBackOffConfig() - qCfg := NewDefaultQueueSettings() - qCfg.NumConsumers = 1 - qCfg.QueueSize = 2 - wantErr := errors.New("some-error") - te, err := NewProfilesExporter(context.Background(), exporter.Settings{ID: fakeProfilesExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, &fakeProfilesExporterConfig, newPushProfilesData(wantErr), WithRetry(rCfg), WithQueue(qCfg)) - require.NoError(t, err) - require.NotNil(t, te) - - md := testdata.GenerateProfiles(3) - const numBatches = 7 - for i := 0; i < numBatches; i++ { - // errors are checked in the checkExporterEnqueueFailedProfilesStats function below. - _ = te.ConsumeProfiles(context.Background(), md) - } - - // 2 batched must be in queue, and 5 batches (15 profile records) rejected due to queue overflow - require.NoError(t, tt.CheckExporterEnqueueFailedProfiles(int64(15))) -} - func TestProfilesExporter_WithSpan(t *testing.T) { set := exportertest.NewNopSettings() sr := new(tracetest.SpanRecorder) @@ -390,21 +297,6 @@ func newPushProfilesData(retError error) consumerprofiles.ConsumeProfilesFunc { } } -func checkRecordedMetricsForProfilesExporter(t *testing.T, tt componenttest.TestTelemetry, le exporterprofiles.Profiles, wantError error) { - ld := testdata.GenerateProfiles(2) - const numBatches = 7 - for i := 0; i < numBatches; i++ { - require.Equal(t, wantError, le.ConsumeProfiles(context.Background(), ld)) - } - - // TODO: When the new metrics correctly count partial dropped fix this. - if wantError != nil { - require.NoError(t, tt.CheckExporterProfiles(0, int64(numBatches*ld.SampleCount()))) - } else { - require.NoError(t, tt.CheckExporterProfiles(int64(numBatches*ld.SampleCount()), 0)) - } -} - func generateProfilesTraffic(t *testing.T, tracer trace.Tracer, le exporterprofiles.Profiles, numRequests int, wantError error) { ld := testdata.GenerateProfiles(1) ctx, span := tracer.Start(context.Background(), fakeProfilesParentSpanName) From 4758173e51d53e78cc67c902fdea8291f91523ae Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Wed, 11 Sep 2024 11:03:41 +0200 Subject: [PATCH 09/18] add profiles support to otlpexporter --- exporter/otlpexporter/README.md | 2 +- exporter/otlpexporter/cfg-schema.yaml | 4 +- exporter/otlpexporter/factory.go | 21 ++ exporter/otlpexporter/factory_test.go | 140 +++++++++++ exporter/otlpexporter/go.mod | 10 + .../internal/metadata/generated_status.go | 7 +- exporter/otlpexporter/metadata.yaml | 4 +- exporter/otlpexporter/otlp.go | 32 ++- exporter/otlpexporter/otlp_test.go | 238 ++++++++++++++++++ 9 files changed, 444 insertions(+), 14 deletions(-) diff --git a/exporter/otlpexporter/README.md b/exporter/otlpexporter/README.md index c1ea6716e1b..55f0464a2ab 100644 --- a/exporter/otlpexporter/README.md +++ b/exporter/otlpexporter/README.md @@ -3,7 +3,7 @@ | Status | | | ------------- |-----------| -| Stability | [beta]: logs | +| Stability | [beta]: logs, profiles | | | [stable]: traces, metrics | | Distributions | [core], [contrib], [k8s] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fotlp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fotlp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fotlp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fotlp) | diff --git a/exporter/otlpexporter/cfg-schema.yaml b/exporter/otlpexporter/cfg-schema.yaml index 073ec5707ee..8bc2b1de606 100644 --- a/exporter/otlpexporter/cfg-schema.yaml +++ b/exporter/otlpexporter/cfg-schema.yaml @@ -68,8 +68,8 @@ fields: - name: endpoint kind: string doc: | - The target to which the exporter is going to send traces or metrics, - using the gRPC protocol. The valid syntax is described at + The target to which the exporter is going to send traces, metrics, logs or + profiles, using the gRPC protocol. The valid syntax is described at https://github.com/grpc/grpc/blob/master/doc/naming.md. - name: compression kind: string diff --git a/exporter/otlpexporter/factory.go b/exporter/otlpexporter/factory.go index 84952defdf3..1c7eb71a0e5 100644 --- a/exporter/otlpexporter/factory.go +++ b/exporter/otlpexporter/factory.go @@ -15,6 +15,7 @@ import ( "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterbatcher" "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/exporter/exporterprofiles" "go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata" ) @@ -26,6 +27,7 @@ func NewFactory() exporter.Factory { exporter.WithTraces(createTracesExporter, metadata.TracesStability), exporter.WithMetrics(createMetricsExporter, metadata.MetricsStability), exporter.WithLogs(createLogsExporter, metadata.LogsStability), + exporterprofiles.WithProfiles(createProfilesExporter, metadata.LogsStability), ) } @@ -104,3 +106,22 @@ func createLogsExporter( exporterhelper.WithShutdown(oce.shutdown), ) } + +func createProfilesExporter( + ctx context.Context, + set exporter.Settings, + cfg component.Config, +) (exporterprofiles.Profiles, error) { + oce := newExporter(cfg, set) + oCfg := cfg.(*Config) + return exporterhelper.NewProfilesExporter(ctx, set, cfg, + oce.pushProfiles, + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + exporterhelper.WithTimeout(oCfg.TimeoutSettings), + exporterhelper.WithRetry(oCfg.RetryConfig), + exporterhelper.WithQueue(oCfg.QueueConfig), + exporterhelper.WithBatcher(oCfg.BatcherConfig), + exporterhelper.WithStart(oce.start), + exporterhelper.WithShutdown(oce.shutdown), + ) +} diff --git a/exporter/otlpexporter/factory_test.go b/exporter/otlpexporter/factory_test.go index 1882f78f94e..f46d4b91ef9 100644 --- a/exporter/otlpexporter/factory_test.go +++ b/exporter/otlpexporter/factory_test.go @@ -197,3 +197,143 @@ func TestCreateLogsExporter(t *testing.T) { require.NoError(t, err) require.NotNil(t, oexp) } + +func TestCreateProfilesExporter(t *testing.T) { + endpoint := testutil.GetAvailableLocalAddress(t) + tests := []struct { + name string + config *Config + mustFailOnStart bool + }{ + { + name: "UseSecure", + config: &Config{ + ClientConfig: configgrpc.ClientConfig{ + Endpoint: endpoint, + TLSSetting: configtls.ClientConfig{ + Insecure: false, + }, + }, + }, + }, + { + name: "Keepalive", + config: &Config{ + ClientConfig: configgrpc.ClientConfig{ + Endpoint: endpoint, + Keepalive: &configgrpc.KeepaliveClientConfig{ + Time: 30 * time.Second, + Timeout: 25 * time.Second, + PermitWithoutStream: true, + }, + }, + }, + }, + { + name: "NoneCompression", + config: &Config{ + ClientConfig: configgrpc.ClientConfig{ + Endpoint: endpoint, + Compression: "none", + }, + }, + }, + { + name: "GzipCompression", + config: &Config{ + ClientConfig: configgrpc.ClientConfig{ + Endpoint: endpoint, + Compression: configcompression.TypeGzip, + }, + }, + }, + { + name: "SnappyCompression", + config: &Config{ + ClientConfig: configgrpc.ClientConfig{ + Endpoint: endpoint, + Compression: configcompression.TypeSnappy, + }, + }, + }, + { + name: "ZstdCompression", + config: &Config{ + ClientConfig: configgrpc.ClientConfig{ + Endpoint: endpoint, + Compression: configcompression.TypeZstd, + }, + }, + }, + { + name: "Headers", + config: &Config{ + ClientConfig: configgrpc.ClientConfig{ + Endpoint: endpoint, + Headers: map[string]configopaque.String{ + "hdr1": "val1", + "hdr2": "val2", + }, + }, + }, + }, + { + name: "NumConsumers", + config: &Config{ + ClientConfig: configgrpc.ClientConfig{ + Endpoint: endpoint, + }, + }, + }, + { + name: "CaCert", + config: &Config{ + ClientConfig: configgrpc.ClientConfig{ + Endpoint: endpoint, + TLSSetting: configtls.ClientConfig{ + Config: configtls.Config{ + CAFile: filepath.Join("testdata", "test_cert.pem"), + }, + }, + }, + }, + }, + { + name: "CertPemFileError", + config: &Config{ + ClientConfig: configgrpc.ClientConfig{ + Endpoint: endpoint, + TLSSetting: configtls.ClientConfig{ + Config: configtls.Config{ + CAFile: "nosuchfile", + }, + }, + }, + }, + mustFailOnStart: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + factory := NewFactory() + set := exportertest.NewNopSettings() + consumer, err := factory.CreateProfilesExporter(context.Background(), set, tt.config) + assert.NoError(t, err) + assert.NotNil(t, consumer) + err = consumer.Start(context.Background(), componenttest.NewNopHost()) + if tt.mustFailOnStart { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + // Shutdown is called even when Start fails + err = consumer.Shutdown(context.Background()) + if err != nil { + // Since the endpoint of OTLP exporter doesn't actually exist, + // exporter may already stop because it cannot connect. + assert.Equal(t, err.Error(), "rpc error: code = Canceled desc = grpc: the client connection is closing") + } + }) + } +} diff --git a/exporter/otlpexporter/go.mod b/exporter/otlpexporter/go.mod index b849fe26e90..d76369a57c4 100644 --- a/exporter/otlpexporter/go.mod +++ b/exporter/otlpexporter/go.mod @@ -7,14 +7,20 @@ require ( go.opentelemetry.io/collector v0.109.0 go.opentelemetry.io/collector/component v0.109.0 go.opentelemetry.io/collector/config/configauth v0.109.0 + go.opentelemetry.io/collector/config/configcompression v1.14.1 go.opentelemetry.io/collector/config/configcompression v1.15.0 go.opentelemetry.io/collector/config/configgrpc v0.109.0 + go.opentelemetry.io/collector/config/configopaque v1.14.1 go.opentelemetry.io/collector/config/configopaque v1.15.0 + go.opentelemetry.io/collector/config/configretry v1.14.1 go.opentelemetry.io/collector/config/configretry v1.15.0 + go.opentelemetry.io/collector/config/configtls v1.14.1 go.opentelemetry.io/collector/config/configtls v1.15.0 + go.opentelemetry.io/collector/confmap v1.14.1 go.opentelemetry.io/collector/confmap v1.15.0 go.opentelemetry.io/collector/consumer v0.109.0 go.opentelemetry.io/collector/exporter v0.109.0 + go.opentelemetry.io/collector/pdata v1.14.1 go.opentelemetry.io/collector/pdata v1.15.0 go.opentelemetry.io/collector/pdata/testdata v0.109.0 go.uber.org/goleak v1.3.0 @@ -156,3 +162,7 @@ replace go.opentelemetry.io/collector/component/componentstatus => ../../compone replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../receiver/receiverprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporterprofiles + +replace go.opentelemetry.io/collector/component/componentprofiles => ../../component/componentprofiles + +replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles diff --git a/exporter/otlpexporter/internal/metadata/generated_status.go b/exporter/otlpexporter/internal/metadata/generated_status.go index 6003002a1a3..36f02238361 100644 --- a/exporter/otlpexporter/internal/metadata/generated_status.go +++ b/exporter/otlpexporter/internal/metadata/generated_status.go @@ -12,7 +12,8 @@ var ( ) const ( - LogsStability = component.StabilityLevelBeta - TracesStability = component.StabilityLevelStable - MetricsStability = component.StabilityLevelStable + LogsStability = component.StabilityLevelBeta + ProfilesStability = component.StabilityLevelBeta + TracesStability = component.StabilityLevelStable + MetricsStability = component.StabilityLevelStable ) diff --git a/exporter/otlpexporter/metadata.yaml b/exporter/otlpexporter/metadata.yaml index 4ab3e8d74a1..3ce303e8c8e 100644 --- a/exporter/otlpexporter/metadata.yaml +++ b/exporter/otlpexporter/metadata.yaml @@ -5,9 +5,9 @@ status: class: exporter stability: stable: [traces, metrics] - beta: [logs] + beta: [logs, profiles] distributions: [core, contrib, k8s] tests: config: - endpoint: otelcol:4317 \ No newline at end of file + endpoint: otelcol:4317 diff --git a/exporter/otlpexporter/otlp.go b/exporter/otlpexporter/otlp.go index b8d6dee2a75..36bc6133c75 100644 --- a/exporter/otlpexporter/otlp.go +++ b/exporter/otlpexporter/otlp.go @@ -24,6 +24,8 @@ import ( "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" ) @@ -33,12 +35,13 @@ type baseExporter struct { config *Config // gRPC clients and connection. - traceExporter ptraceotlp.GRPCClient - metricExporter pmetricotlp.GRPCClient - logExporter plogotlp.GRPCClient - clientConn *grpc.ClientConn - metadata metadata.MD - callOptions []grpc.CallOption + traceExporter ptraceotlp.GRPCClient + metricExporter pmetricotlp.GRPCClient + logExporter plogotlp.GRPCClient + profileExporter pprofileotlp.GRPCClient + clientConn *grpc.ClientConn + metadata metadata.MD + callOptions []grpc.CallOption settings component.TelemetrySettings @@ -64,6 +67,7 @@ func (e *baseExporter) start(ctx context.Context, host component.Host) (err erro e.traceExporter = ptraceotlp.NewGRPCClient(e.clientConn) e.metricExporter = pmetricotlp.NewGRPCClient(e.clientConn) e.logExporter = plogotlp.NewGRPCClient(e.clientConn) + e.profileExporter = pprofileotlp.NewGRPCClient(e.clientConn) headers := map[string]string{} for k, v := range e.config.ClientConfig.Headers { headers[k] = string(v) @@ -131,6 +135,22 @@ func (e *baseExporter) pushLogs(ctx context.Context, ld plog.Logs) error { return nil } +func (e *baseExporter) pushProfiles(ctx context.Context, td pprofile.Profiles) error { + req := pprofileotlp.NewExportRequestFromProfiles(td) + resp, respErr := e.profileExporter.Export(e.enhanceContext(ctx), req, e.callOptions...) + if err := processError(respErr); err != nil { + return err + } + partialSuccess := resp.PartialSuccess() + if !(partialSuccess.ErrorMessage() == "" && partialSuccess.RejectedProfiles() == 0) { + e.settings.Logger.Warn("Partial success response", + zap.String("message", resp.PartialSuccess().ErrorMessage()), + zap.Int64("dropped_profiles", resp.PartialSuccess().RejectedProfiles()), + ) + } + return nil +} + func (e *baseExporter) enhanceContext(ctx context.Context) context.Context { if e.metadata.Len() > 0 { return metadata.NewOutgoingContext(ctx, e.metadata) diff --git a/exporter/otlpexporter/otlp_test.go b/exporter/otlpexporter/otlp_test.go index 7ab84a8cf5a..9f53fe9e8f5 100644 --- a/exporter/otlpexporter/otlp_test.go +++ b/exporter/otlpexporter/otlp_test.go @@ -35,6 +35,8 @@ import ( "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" "go.opentelemetry.io/collector/pdata/testdata" @@ -223,6 +225,70 @@ func otlpMetricsReceiverOnGRPCServer(ln net.Listener) *mockMetricsReceiver { return rcv } +type mockProfilesReceiver struct { + pprofileotlp.UnimplementedGRPCServer + mockReceiver + exportResponse func() pprofileotlp.ExportResponse + lastRequest pprofile.Profiles +} + +func (r *mockProfilesReceiver) Export(ctx context.Context, req pprofileotlp.ExportRequest) (pprofileotlp.ExportResponse, error) { + r.requestCount.Add(int32(1)) + td := req.Profiles() + r.totalItems.Add(int32(td.SampleCount())) + r.mux.Lock() + defer r.mux.Unlock() + r.lastRequest = td + r.metadata, _ = metadata.FromIncomingContext(ctx) + return r.exportResponse(), r.exportError +} + +func (r *mockProfilesReceiver) getLastRequest() pprofile.Profiles { + r.mux.Lock() + defer r.mux.Unlock() + return r.lastRequest +} + +func (r *mockProfilesReceiver) setExportResponse(fn func() pprofileotlp.ExportResponse) { + r.mux.Lock() + defer r.mux.Unlock() + r.exportResponse = fn +} + +func otlpProfilesReceiverOnGRPCServer(ln net.Listener, useTLS bool) (*mockProfilesReceiver, error) { + sopts := []grpc.ServerOption{} + + if useTLS { + _, currentFile, _, _ := runtime.Caller(0) + basepath := filepath.Dir(currentFile) + certpath := filepath.Join(basepath, filepath.Join("testdata", "test_cert.pem")) + keypath := filepath.Join(basepath, filepath.Join("testdata", "test_key.pem")) + + creds, err := credentials.NewServerTLSFromFile(certpath, keypath) + if err != nil { + return nil, err + } + sopts = append(sopts, grpc.Creds(creds)) + } + + rcv := &mockProfilesReceiver{ + mockReceiver: mockReceiver{ + srv: grpc.NewServer(sopts...), + requestCount: &atomic.Int32{}, + totalItems: &atomic.Int32{}, + }, + exportResponse: pprofileotlp.NewExportResponse, + } + + // Now run it as a gRPC server + pprofileotlp.RegisterGRPCServer(rcv.srv, rcv) + go func() { + _ = rcv.srv.Serve(ln) + }() + + return rcv, nil +} + func TestSendTraces(t *testing.T) { // Start an OTLP-compatible receiver. ln, err := net.Listen("tcp", "localhost:") @@ -795,3 +861,175 @@ func TestSendLogData(t *testing.T) { assert.Len(t, observed.FilterLevelExact(zap.WarnLevel).All(), 1) assert.Contains(t, observed.FilterLevelExact(zap.WarnLevel).All()[0].Message, "Partial success") } + +func TestSendProfiles(t *testing.T) { + // Start an OTLP-compatible receiver. + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + rcv, _ := otlpProfilesReceiverOnGRPCServer(ln, false) + // Also closes the connection. + defer rcv.srv.GracefulStop() + + // Start an OTLP exporter and point to the receiver. + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + // Disable queuing to ensure that we execute the request when calling ConsumeProfiles + // otherwise we will not see any errors. + cfg.QueueConfig.Enabled = false + cfg.ClientConfig = configgrpc.ClientConfig{ + Endpoint: ln.Addr().String(), + TLSSetting: configtls.ClientConfig{ + Insecure: true, + }, + Headers: map[string]configopaque.String{ + "header": "header-value", + }, + } + set := exportertest.NewNopSettings() + set.BuildInfo.Description = "Collector" + set.BuildInfo.Version = "1.2.3test" + + // For testing the "Partial success" warning. + logger, observed := observer.New(zap.DebugLevel) + set.TelemetrySettings.Logger = zap.New(logger) + + exp, err := factory.CreateProfilesExporter(context.Background(), set, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + host := componenttest.NewNopHost() + assert.NoError(t, exp.Start(context.Background(), host)) + + // Ensure that initially there is no data in the receiver. + assert.EqualValues(t, 0, rcv.requestCount.Load()) + + // Send empty profile. + td := pprofile.NewProfiles() + assert.NoError(t, exp.ConsumeProfiles(context.Background(), td)) + + // Wait until it is received. + assert.Eventually(t, func() bool { + return rcv.requestCount.Load() > 0 + }, 10*time.Second, 5*time.Millisecond) + + // Ensure it was received empty. + assert.EqualValues(t, 0, rcv.totalItems.Load()) + + // A request with 2 profiles. + td = testdata.GenerateProfiles(2) + + err = exp.ConsumeProfiles(context.Background(), td) + assert.NoError(t, err) + + // Wait until it is received. + assert.Eventually(t, func() bool { + return rcv.requestCount.Load() > 1 + }, 10*time.Second, 5*time.Millisecond) + + expectedHeader := []string{"header-value"} + + // Verify received span. + assert.EqualValues(t, 2, rcv.totalItems.Load()) + assert.EqualValues(t, 2, rcv.requestCount.Load()) + assert.EqualValues(t, td, rcv.getLastRequest()) + + md := rcv.getMetadata() + require.EqualValues(t, md.Get("header"), expectedHeader) + require.Equal(t, len(md.Get("User-Agent")), 1) + require.Contains(t, md.Get("User-Agent")[0], "Collector/1.2.3test") + + // Return partial success + rcv.setExportResponse(func() pprofileotlp.ExportResponse { + response := pprofileotlp.NewExportResponse() + partialSuccess := response.PartialSuccess() + partialSuccess.SetErrorMessage("Some spans were not ingested") + partialSuccess.SetRejectedProfiles(1) + + return response + }) + + // A request with 2 Profile entries. + td = testdata.GenerateProfiles(2) + + err = exp.ConsumeProfiles(context.Background(), td) + assert.NoError(t, err) + assert.Len(t, observed.FilterLevelExact(zap.WarnLevel).All(), 1) + assert.Contains(t, observed.FilterLevelExact(zap.WarnLevel).All()[0].Message, "Partial success") +} + +func TestSendProfilesWhenEndpointHasHttpScheme(t *testing.T) { + tests := []struct { + name string + useTLS bool + scheme string + gRPCClientSettings configgrpc.ClientConfig + }{ + { + name: "Use https scheme", + useTLS: true, + scheme: "https://", + gRPCClientSettings: configgrpc.ClientConfig{}, + }, + { + name: "Use http scheme", + useTLS: false, + scheme: "http://", + gRPCClientSettings: configgrpc.ClientConfig{ + TLSSetting: configtls.ClientConfig{ + Insecure: true, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // Start an OTLP-compatible receiver. + ln, err := net.Listen("tcp", "localhost:") + require.NoError(t, err, "Failed to find an available address to run the gRPC server: %v", err) + rcv, err := otlpProfilesReceiverOnGRPCServer(ln, test.useTLS) + require.NoError(t, err, "Failed to start mock OTLP receiver") + // Also closes the connection. + defer rcv.srv.GracefulStop() + + // Start an OTLP exporter and point to the receiver. + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.ClientConfig = test.gRPCClientSettings + cfg.ClientConfig.Endpoint = test.scheme + ln.Addr().String() + if test.useTLS { + cfg.ClientConfig.TLSSetting.InsecureSkipVerify = true + } + set := exportertest.NewNopSettings() + exp, err := factory.CreateProfilesExporter(context.Background(), set, cfg) + require.NoError(t, err) + require.NotNil(t, exp) + + defer func() { + assert.NoError(t, exp.Shutdown(context.Background())) + }() + + host := componenttest.NewNopHost() + assert.NoError(t, exp.Start(context.Background(), host)) + + // Ensure that initially there is no data in the receiver. + assert.EqualValues(t, 0, rcv.requestCount.Load()) + + // Send empty profile. + td := pprofile.NewProfiles() + assert.NoError(t, exp.ConsumeProfiles(context.Background(), td)) + + // Wait until it is received. + assert.Eventually(t, func() bool { + return rcv.requestCount.Load() > 0 + }, 10*time.Second, 5*time.Millisecond) + + // Ensure it was received empty. + assert.EqualValues(t, 0, rcv.totalItems.Load()) + }) + } +} From 23eb958ddf5c5c6ab645579a3bee817f3a254c87 Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Wed, 11 Sep 2024 11:24:03 +0200 Subject: [PATCH 10/18] add profiles support to otlphttpexporter --- cmd/builder/internal/builder/main_test.go | 1 + component/componenttest/obsreporttest.go | 10 -- .../componenttest/otelprometheuschecker.go | 4 - .../testdata/prometheus_response | 6 - .../exporterhelper/profiles_batch_test.go | 4 +- exporter/exporterhelper/profiles_test.go | 22 +-- exporter/otlpexporter/otlp_test.go | 2 +- exporter/otlphttpexporter/README.md | 2 +- exporter/otlphttpexporter/factory.go | 40 ++++- exporter/otlphttpexporter/factory_test.go | 21 ++- exporter/otlphttpexporter/go.mod | 4 + .../internal/metadata/generated_status.go | 7 +- exporter/otlphttpexporter/metadata.yaml | 4 +- exporter/otlphttpexporter/otlp.go | 68 +++++++- exporter/otlphttpexporter/otlp_test.go | 147 +++++++++++++++++- .../obsmetrics/obs_exporter.go | 5 + versions.yaml | 1 + 17 files changed, 288 insertions(+), 60 deletions(-) diff --git a/cmd/builder/internal/builder/main_test.go b/cmd/builder/internal/builder/main_test.go index 400740610cf..7ce5c343036 100644 --- a/cmd/builder/internal/builder/main_test.go +++ b/cmd/builder/internal/builder/main_test.go @@ -62,6 +62,7 @@ var ( "/confmap/provider/yamlprovider", "/consumer", "/consumer/consumerprofiles", + "/consumer/consumererror/consumererrorprofiles", "/consumer/consumertest", "/connector", "/connector/connectorprofiles", diff --git a/component/componenttest/obsreporttest.go b/component/componenttest/obsreporttest.go index 76d77adc9f9..9e780315017 100644 --- a/component/componenttest/obsreporttest.go +++ b/component/componenttest/obsreporttest.go @@ -56,12 +56,6 @@ func (tts *TestTelemetry) CheckExporterMetrics(sentMetricsPoints, sendFailedMetr return tts.prometheusChecker.checkExporterMetrics(tts.id, sentMetricsPoints, sendFailedMetricsPoints) } -// CheckExporterProfiles checks that for the current exported values for profiles exporter metrics match given values. -// Note: SetupTelemetry must be called before this function. -func (tts *TestTelemetry) CheckExporterProfiles(sentSamples, sendFailedSamples int64) error { - return tts.prometheusChecker.checkExporterProfiles(tts.id, sentSamples, sendFailedSamples) -} - func (tts *TestTelemetry) CheckExporterEnqueueFailedMetrics(enqueueFailed int64) error { return tts.prometheusChecker.checkExporterEnqueueFailed(tts.id, "metric_points", enqueueFailed) } @@ -74,10 +68,6 @@ func (tts *TestTelemetry) CheckExporterEnqueueFailedLogs(enqueueFailed int64) er return tts.prometheusChecker.checkExporterEnqueueFailed(tts.id, "log_records", enqueueFailed) } -func (tts *TestTelemetry) CheckExporterEnqueueFailedProfiles(enqueueFailed int64) error { - return tts.prometheusChecker.checkExporterEnqueueFailed(tts.id, "samples", enqueueFailed) -} - // CheckExporterLogs checks that for the current exported values for logs exporter metrics match given values. // Note: SetupTelemetry must be called before this function. func (tts *TestTelemetry) CheckExporterLogs(sentLogRecords, sendFailedLogRecords int64) error { diff --git a/component/componenttest/otelprometheuschecker.go b/component/componenttest/otelprometheuschecker.go index aea40dcad89..6a63617c206 100644 --- a/component/componenttest/otelprometheuschecker.go +++ b/component/componenttest/otelprometheuschecker.go @@ -82,10 +82,6 @@ func (pc *prometheusChecker) checkExporterMetrics(exporter component.ID, sent, s return pc.checkExporter(exporter, "metric_points", sent, sendFailed) } -func (pc *prometheusChecker) checkExporterProfiles(exporter component.ID, sent, sendFailed int64) error { - return pc.checkExporter(exporter, "samples", sent, sendFailed) -} - func (pc *prometheusChecker) checkExporter(exporter component.ID, datatype string, sent, sendFailed int64) error { exporterAttrs := attributesForExporterMetrics(exporter) errs := pc.checkCounter(fmt.Sprintf("exporter_sent_%s", datatype), sent, exporterAttrs) diff --git a/component/componenttest/testdata/prometheus_response b/component/componenttest/testdata/prometheus_response index 4243df1d5ef..9d0eb69ee7f 100644 --- a/component/componenttest/testdata/prometheus_response +++ b/component/componenttest/testdata/prometheus_response @@ -16,12 +16,6 @@ otelcol_exporter_send_failed_log_records{exporter="fakeExporter"} 36 # HELP otelcol_exporter_sent_log_records Number of logs successfully sent to destination. # TYPE otelcol_exporter_sent_log_records counter otelcol_exporter_sent_log_records{exporter="fakeExporter"} 103 -# HELP otelcol_exporter_send_failed_samples Number of samples in failed attempts to send to destination. -# TYPE otelcol_exporter_send_failed_samples counter -otelcol_exporter_send_failed_samples{exporter="fakeExporter"} 14 -# HELP otelcol_exporter_sent_samples Number of samples successfully sent to destination. -# TYPE otelcol_exporter_sent_samples counter -otelcol_exporter_sent_samples{exporter="fakeExporter"} 43 # HELP otelcol_processor_accepted_spans Number of spans successfully pushed into the next component in the pipeline. # TYPE otelcol_processor_accepted_spans counter otelcol_processor_accepted_spans{processor="fakeProcessor"} 42 diff --git a/exporter/exporterhelper/profiles_batch_test.go b/exporter/exporterhelper/profiles_batch_test.go index 96e0d35e54b..97e03120f75 100644 --- a/exporter/exporterhelper/profiles_batch_test.go +++ b/exporter/exporterhelper/profiles_batch_test.go @@ -18,7 +18,7 @@ func TestMergeProfiles(t *testing.T) { pr1 := &profilesRequest{pd: testdata.GenerateProfiles(2)} pr2 := &profilesRequest{pd: testdata.GenerateProfiles(3)} res, err := mergeProfiles(context.Background(), pr1, pr2) - assert.Nil(t, err) + assert.NoError(t, err) assert.Equal(t, 5, res.(*profilesRequest).pd.SampleCount()) } @@ -127,7 +127,7 @@ func TestMergeSplitProfiles(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { res, err := mergeSplitProfiles(context.Background(), tt.cfg, tt.pr1, tt.pr2) - assert.Nil(t, err) + assert.NoError(t, err) assert.Equal(t, len(tt.expected), len(res)) for i, r := range res { assert.Equal(t, tt.expected[i], r.(*profilesRequest)) diff --git a/exporter/exporterhelper/profiles_test.go b/exporter/exporterhelper/profiles_test.go index 19665a23c01..6968a818467 100644 --- a/exporter/exporterhelper/profiles_test.go +++ b/exporter/exporterhelper/profiles_test.go @@ -40,7 +40,6 @@ const ( ) var ( - fakeProfilesExporterName = component.MustNewIDWithName("fake_profiles_exporter", "with_name") fakeProfilesExporterConfig = struct{}{} ) @@ -190,7 +189,7 @@ func TestProfilesExporter_WithSpan(t *testing.T) { defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) le, err := NewProfilesExporter(context.Background(), set, &fakeProfilesExporterConfig, newPushProfilesData(nil)) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, le) checkWrapSpanForProfilesExporter(t, sr, set.TracerProvider.Tracer("test"), le, nil, 1) } @@ -203,7 +202,7 @@ func TestProfilesRequestExporter_WithSpan(t *testing.T) { defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) le, err := NewProfilesRequestExporter(context.Background(), set, (&fakeRequestConverter{}).requestFromProfilesFunc) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, le) checkWrapSpanForProfilesExporter(t, sr, set.TracerProvider.Tracer("test"), le, nil, 1) } @@ -217,7 +216,7 @@ func TestProfilesExporter_WithSpan_ReturnError(t *testing.T) { want := errors.New("my_error") le, err := NewProfilesExporter(context.Background(), set, &fakeProfilesExporterConfig, newPushProfilesData(want)) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, le) checkWrapSpanForProfilesExporter(t, sr, set.TracerProvider.Tracer("test"), le, want, 1) } @@ -231,7 +230,7 @@ func TestProfilesRequestExporter_WithSpan_ReturnError(t *testing.T) { want := errors.New("my_error") le, err := NewProfilesRequestExporter(context.Background(), set, (&fakeRequestConverter{requestError: want}).requestFromProfilesFunc) - require.Nil(t, err) + require.NoError(t, err) require.NotNil(t, le) checkWrapSpanForProfilesExporter(t, sr, set.TracerProvider.Tracer("test"), le, want, 1) } @@ -244,7 +243,7 @@ func TestProfilesExporter_WithShutdown(t *testing.T) { assert.NotNil(t, le) assert.NoError(t, err) - assert.Nil(t, le.Shutdown(context.Background())) + assert.NoError(t, le.Shutdown(context.Background())) assert.True(t, shutdownCalled) } @@ -257,7 +256,7 @@ func TestProfilesRequestExporter_WithShutdown(t *testing.T) { assert.NotNil(t, le) assert.NoError(t, err) - assert.Nil(t, le.Shutdown(context.Background())) + assert.NoError(t, le.Shutdown(context.Background())) assert.True(t, shutdownCalled) } @@ -284,13 +283,6 @@ func TestProfilesRequestExporter_WithShutdown_ReturnError(t *testing.T) { assert.Equal(t, le.Shutdown(context.Background()), want) } -func newPushProfilesDataModifiedDownstream(retError error) consumerprofiles.ConsumeProfilesFunc { - return func(_ context.Context, profile pprofile.Profiles) error { - profile.ResourceProfiles().MoveAndAppendTo(pprofile.NewResourceProfilesSlice()) - return retError - } -} - func newPushProfilesData(retError error) consumerprofiles.ConsumeProfilesFunc { return func(_ context.Context, _ pprofile.Profiles) error { return retError @@ -313,7 +305,7 @@ func checkWrapSpanForProfilesExporter(t *testing.T, sr *tracetest.SpanRecorder, // Inspection time! gotSpanData := sr.Ended() - require.Equal(t, numRequests+1, len(gotSpanData)) + require.Len(t, gotSpanData, numRequests+1) parentSpan := gotSpanData[numRequests] require.Equalf(t, fakeProfilesParentSpanName, parentSpan.Name(), "SpanData %v", parentSpan) diff --git a/exporter/otlpexporter/otlp_test.go b/exporter/otlpexporter/otlp_test.go index 9f53fe9e8f5..4e823d125f5 100644 --- a/exporter/otlpexporter/otlp_test.go +++ b/exporter/otlpexporter/otlp_test.go @@ -939,7 +939,7 @@ func TestSendProfiles(t *testing.T) { md := rcv.getMetadata() require.EqualValues(t, md.Get("header"), expectedHeader) - require.Equal(t, len(md.Get("User-Agent")), 1) + require.Len(t, md.Get("User-Agent"), 1) require.Contains(t, md.Get("User-Agent")[0], "Collector/1.2.3test") // Return partial success diff --git a/exporter/otlphttpexporter/README.md b/exporter/otlphttpexporter/README.md index fbc58e4c9a2..ea75602eb16 100644 --- a/exporter/otlphttpexporter/README.md +++ b/exporter/otlphttpexporter/README.md @@ -3,7 +3,7 @@ | Status | | | ------------- |-----------| -| Stability | [beta]: logs | +| Stability | [beta]: logs, profiles | | | [stable]: traces, metrics | | Distributions | [core], [contrib], [k8s] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fotlphttp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fotlphttp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fotlphttp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fotlphttp) | diff --git a/exporter/otlphttpexporter/factory.go b/exporter/otlphttpexporter/factory.go index 96f7ed3c989..b13b4638bad 100644 --- a/exporter/otlphttpexporter/factory.go +++ b/exporter/otlphttpexporter/factory.go @@ -18,6 +18,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/exporter/exporterprofiles" "go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata" ) @@ -29,6 +30,7 @@ func NewFactory() exporter.Factory { exporter.WithTraces(createTracesExporter, metadata.TracesStability), exporter.WithMetrics(createMetricsExporter, metadata.MetricsStability), exporter.WithLogs(createLogsExporter, metadata.LogsStability), + exporterprofiles.WithProfiles(createProfilesExporter, metadata.ProfilesStability), ) } @@ -49,7 +51,7 @@ func createDefaultConfig() component.Config { } } -func composeSignalURL(oCfg *Config, signalOverrideURL string, signalName string) (string, error) { +func composeSignalURL(oCfg *Config, signalOverrideURL, signalName, signalVersion string) (string, error) { switch { case signalOverrideURL != "": _, err := url.Parse(signalOverrideURL) @@ -61,9 +63,9 @@ func composeSignalURL(oCfg *Config, signalOverrideURL string, signalName string) return "", fmt.Errorf("either endpoint or %s_endpoint must be specified", signalName) default: if strings.HasSuffix(oCfg.Endpoint, "/") { - return oCfg.Endpoint + "v1/" + signalName, nil + return oCfg.Endpoint + signalVersion + "/" + signalName, nil } - return oCfg.Endpoint + "/v1/" + signalName, nil + return oCfg.Endpoint + "/" + signalVersion + "/" + signalName, nil } } @@ -78,7 +80,7 @@ func createTracesExporter( } oCfg := cfg.(*Config) - oce.tracesURL, err = composeSignalURL(oCfg, oCfg.TracesEndpoint, "traces") + oce.tracesURL, err = composeSignalURL(oCfg, oCfg.TracesEndpoint, "traces", "v1") if err != nil { return nil, err } @@ -104,7 +106,7 @@ func createMetricsExporter( } oCfg := cfg.(*Config) - oce.metricsURL, err = composeSignalURL(oCfg, oCfg.MetricsEndpoint, "metrics") + oce.metricsURL, err = composeSignalURL(oCfg, oCfg.MetricsEndpoint, "metrics", "v1") if err != nil { return nil, err } @@ -130,7 +132,7 @@ func createLogsExporter( } oCfg := cfg.(*Config) - oce.logsURL, err = composeSignalURL(oCfg, oCfg.LogsEndpoint, "logs") + oce.logsURL, err = composeSignalURL(oCfg, oCfg.LogsEndpoint, "logs", "v1") if err != nil { return nil, err } @@ -144,3 +146,29 @@ func createLogsExporter( exporterhelper.WithRetry(oCfg.RetryConfig), exporterhelper.WithQueue(oCfg.QueueConfig)) } + +func createProfilesExporter( + ctx context.Context, + set exporter.Settings, + cfg component.Config, +) (exporterprofiles.Profiles, error) { + oce, err := newExporter(cfg, set) + if err != nil { + return nil, err + } + oCfg := cfg.(*Config) + + oce.profilesURL, err = composeSignalURL(oCfg, "", "profiles", "v1development") + if err != nil { + return nil, err + } + + return exporterhelper.NewProfilesExporter(ctx, set, cfg, + oce.pushProfiles, + exporterhelper.WithStart(oce.start), + exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), + // explicitly disable since we rely on http.Client timeout logic. + exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithRetry(oCfg.RetryConfig), + exporterhelper.WithQueue(oCfg.QueueConfig)) +} diff --git a/exporter/otlphttpexporter/factory_test.go b/exporter/otlphttpexporter/factory_test.go index 6648a1d5001..a35f62557f0 100644 --- a/exporter/otlphttpexporter/factory_test.go +++ b/exporter/otlphttpexporter/factory_test.go @@ -209,19 +209,36 @@ func TestCreateLogsExporter(t *testing.T) { require.NotNil(t, oexp) } +func TestCreateProfilesExporter(t *testing.T) { + factory := NewFactory() + cfg := factory.CreateDefaultConfig().(*Config) + cfg.ClientConfig.Endpoint = "http://" + testutil.GetAvailableLocalAddress(t) + + set := exportertest.NewNopSettings() + oexp, err := factory.CreateProfilesExporter(context.Background(), set, cfg) + require.NoError(t, err) + require.NotNil(t, oexp) +} + func TestComposeSignalURL(t *testing.T) { factory := NewFactory() cfg := factory.CreateDefaultConfig().(*Config) // Has slash at end cfg.ClientConfig.Endpoint = "http://localhost:4318/" - url, err := composeSignalURL(cfg, "", "traces") + url, err := composeSignalURL(cfg, "", "traces", "v1") require.NoError(t, err) assert.Equal(t, "http://localhost:4318/v1/traces", url) // No slash at end cfg.ClientConfig.Endpoint = "http://localhost:4318" - url, err = composeSignalURL(cfg, "", "traces") + url, err = composeSignalURL(cfg, "", "traces", "v1") require.NoError(t, err) assert.Equal(t, "http://localhost:4318/v1/traces", url) + + // Different version + cfg.ClientConfig.Endpoint = "http://localhost:4318" + url, err = composeSignalURL(cfg, "", "traces", "v2") + require.NoError(t, err) + assert.Equal(t, "http://localhost:4318/v2/traces", url) } diff --git a/exporter/otlphttpexporter/go.mod b/exporter/otlphttpexporter/go.mod index 4c57f7b8604..5a155d8eaef 100644 --- a/exporter/otlphttpexporter/go.mod +++ b/exporter/otlphttpexporter/go.mod @@ -153,3 +153,7 @@ replace go.opentelemetry.io/collector/component/componentstatus => ../../compone replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../receiver/receiverprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporterprofiles + +replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles + +replace go.opentelemetry.io/collector/component/componentprofiles => ../../component/componentprofiles diff --git a/exporter/otlphttpexporter/internal/metadata/generated_status.go b/exporter/otlphttpexporter/internal/metadata/generated_status.go index c38e6de2550..fe3678e85e3 100644 --- a/exporter/otlphttpexporter/internal/metadata/generated_status.go +++ b/exporter/otlphttpexporter/internal/metadata/generated_status.go @@ -12,7 +12,8 @@ var ( ) const ( - LogsStability = component.StabilityLevelBeta - TracesStability = component.StabilityLevelStable - MetricsStability = component.StabilityLevelStable + LogsStability = component.StabilityLevelBeta + ProfilesStability = component.StabilityLevelBeta + TracesStability = component.StabilityLevelStable + MetricsStability = component.StabilityLevelStable ) diff --git a/exporter/otlphttpexporter/metadata.yaml b/exporter/otlphttpexporter/metadata.yaml index 6d4b1bcba34..cb86047581c 100644 --- a/exporter/otlphttpexporter/metadata.yaml +++ b/exporter/otlphttpexporter/metadata.yaml @@ -5,10 +5,10 @@ status: class: exporter stability: stable: [traces, metrics] - beta: [logs] + beta: [logs, profiles] distributions: [core, contrib, k8s] tests: config: endpoint: "https://1.2.3.4:1234" - + diff --git a/exporter/otlphttpexporter/otlp.go b/exporter/otlphttpexporter/otlp.go index 5c88b53c83f..0decd18315a 100644 --- a/exporter/otlphttpexporter/otlp.go +++ b/exporter/otlphttpexporter/otlp.go @@ -28,19 +28,22 @@ import ( "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" ) type baseExporter struct { // Input configuration. - config *Config - client *http.Client - tracesURL string - metricsURL string - logsURL string - logger *zap.Logger - settings component.TelemetrySettings + config *Config + client *http.Client + tracesURL string + metricsURL string + logsURL string + profilesURL string + logger *zap.Logger + settings component.TelemetrySettings // Default user-agent header. userAgent string } @@ -149,6 +152,27 @@ func (e *baseExporter) pushLogs(ctx context.Context, ld plog.Logs) error { return e.export(ctx, e.logsURL, request, e.logsPartialSuccessHandler) } +func (e *baseExporter) pushProfiles(ctx context.Context, td pprofile.Profiles) error { + tr := pprofileotlp.NewExportRequestFromProfiles(td) + + var err error + var request []byte + switch e.config.Encoding { + case EncodingJSON: + request, err = tr.MarshalJSON() + case EncodingProto: + request, err = tr.MarshalProto() + default: + err = fmt.Errorf("invalid encoding: %s", e.config.Encoding) + } + + if err != nil { + return consumererror.NewPermanent(err) + } + + return e.export(ctx, e.profilesURL, request, e.profilesPartialSuccessHandler) +} + func (e *baseExporter) export(ctx context.Context, url string, request []byte, partialSuccessHandler partialSuccessHandler) error { e.logger.Debug("Preparing to make HTTP request", zap.String("url", url)) req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(request)) @@ -392,3 +416,33 @@ func (e *baseExporter) logsPartialSuccessHandler(protoBytes []byte, contentType } return nil } + +func (e *baseExporter) profilesPartialSuccessHandler(protoBytes []byte, contentType string) error { + if protoBytes == nil { + return nil + } + exportResponse := pprofileotlp.NewExportResponse() + switch contentType { + case protobufContentType: + err := exportResponse.UnmarshalProto(protoBytes) + if err != nil { + return fmt.Errorf("error parsing protobuf response: %w", err) + } + case jsonContentType: + err := exportResponse.UnmarshalJSON(protoBytes) + if err != nil { + return fmt.Errorf("error parsing json response: %w", err) + } + default: + return nil + } + + partialSuccess := exportResponse.PartialSuccess() + if !(partialSuccess.ErrorMessage() == "" && partialSuccess.RejectedProfiles() == 0) { + e.logger.Warn("Partial success response", + zap.String("message", exportResponse.PartialSuccess().ErrorMessage()), + zap.Int64("dropped_samples", exportResponse.PartialSuccess().RejectedProfiles()), + ) + } + return nil +} diff --git a/exporter/otlphttpexporter/otlp_test.go b/exporter/otlphttpexporter/otlp_test.go index 7ef750f9e90..65e8ea02752 100644 --- a/exporter/otlphttpexporter/otlp_test.go +++ b/exporter/otlphttpexporter/otlp_test.go @@ -33,6 +33,8 @@ import ( "go.opentelemetry.io/collector/pdata/plog/plogotlp" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pdata/pprofile/pprofileotlp" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" ) @@ -40,6 +42,7 @@ import ( const tracesTelemetryType = "traces" const metricsTelemetryType = "metrics" const logsTelemetryType = "logs" +const profilesTelemetryType = "profiles" type responseSerializer interface { MarshalJSON() ([]byte, error) @@ -72,6 +75,14 @@ func provideLogsResponseSerializer() responseSerializer { return response } +func provideProfilesResponseSerializer() responseSerializer { + response := pprofileotlp.NewExportResponse() + partial := response.PartialSuccess() + partial.SetErrorMessage("hello") + partial.SetRejectedProfiles(1) + return response +} + func TestErrorResponses(t *testing.T) { errMsgPrefix := func(srv *httptest.Server) string { return fmt.Sprintf("error exporting items, request to %s/v1/traces responded with HTTP Status Code ", srv.URL) @@ -381,6 +392,40 @@ func TestUserAgent(t *testing.T) { }) } }) + + t.Run("profiles", func(t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + srv := createBackend("/v1development/profiles", func(writer http.ResponseWriter, request *http.Request) { + assert.Contains(t, request.Header.Get("user-agent"), test.expectedUA) + writer.WriteHeader(200) + }) + defer srv.Close() + + cfg := &Config{ + Encoding: EncodingProto, + ClientConfig: confighttp.ClientConfig{ + Endpoint: srv.URL, + Headers: test.headers, + }, + } + exp, err := createProfilesExporter(context.Background(), set, cfg) + require.NoError(t, err) + + // start the exporter + err = exp.Start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, exp.Shutdown(context.Background())) + }) + + // generate data + profiles := pprofile.NewProfiles() + err = exp.ConsumeProfiles(context.Background(), profiles) + require.NoError(t, err) + }) + } + }) } func TestPartialSuccessInvalidBody(t *testing.T) { @@ -404,6 +449,10 @@ func TestPartialSuccessInvalidBody(t *testing.T) { telemetryType: "logs", handler: exp.logsPartialSuccessHandler, }, + { + telemetryType: "profiles", + handler: exp.profilesPartialSuccessHandler, + }, } for _, tt := range invalidBodyCases { t.Run("Invalid response body_"+tt.telemetryType, func(t *testing.T) { @@ -428,7 +477,7 @@ func TestPartialSuccessUnsupportedContentType(t *testing.T) { contentType: "application/octet-stream", }, } - for _, telemetryType := range []string{"logs", "metrics", "traces"} { + for _, telemetryType := range []string{"logs", "metrics", "traces", "profiles"} { for _, tt := range unsupportedContentTypeCases { t.Run("Unsupported content type "+tt.contentType+" "+telemetryType, func(t *testing.T) { var handler func(b []byte, contentType string) error @@ -439,6 +488,8 @@ func TestPartialSuccessUnsupportedContentType(t *testing.T) { handler = exp.metricsPartialSuccessHandler case "traces": handler = exp.tracesPartialSuccessHandler + case "profiles": + handler = exp.profilesPartialSuccessHandler default: panic(telemetryType) } @@ -529,6 +580,11 @@ func TestPartialResponse_missingHeaderButHasBody(t *testing.T) { handler: exp.logsPartialSuccessHandler, serializer: provideLogsResponseSerializer, }, + { + telemetryType: profilesTelemetryType, + handler: exp.profilesPartialSuccessHandler, + serializer: provideProfilesResponseSerializer, + }, } for _, ct := range contentTypes { @@ -593,6 +649,10 @@ func TestPartialResponse_missingHeaderAndBody(t *testing.T) { telemetryType: logsTelemetryType, handler: exp.logsPartialSuccessHandler, }, + { + telemetryType: profilesTelemetryType, + handler: exp.profilesPartialSuccessHandler, + }, } for _, ct := range contentTypes { @@ -661,6 +721,11 @@ func TestPartialSuccess_shortContentLengthHeader(t *testing.T) { handler: exp.logsPartialSuccessHandler, serializer: provideLogsResponseSerializer, }, + { + telemetryType: profilesTelemetryType, + handler: exp.profilesPartialSuccessHandler, + serializer: provideProfilesResponseSerializer, + }, } for _, ct := range contentTypes { @@ -720,6 +785,10 @@ func TestPartialSuccess_longContentLengthHeader(t *testing.T) { telemetryType: logsTelemetryType, serializer: provideLogsResponseSerializer, }, + { + telemetryType: profilesTelemetryType, + serializer: provideProfilesResponseSerializer, + }, } for _, ct := range contentTypes { @@ -743,6 +812,8 @@ func TestPartialSuccess_longContentLengthHeader(t *testing.T) { handler = exp.metricsPartialSuccessHandler case logsTelemetryType: handler = exp.logsPartialSuccessHandler + case profilesTelemetryType: + handler = exp.profilesPartialSuccessHandler default: require.Fail(t, "unsupported telemetry type: %s", ct.contentType) } @@ -874,6 +945,47 @@ func TestPartialSuccess_metrics(t *testing.T) { require.Contains(t, observed.FilterLevelExact(zap.WarnLevel).All()[0].Message, "Partial success") } +func TestPartialSuccess_profiles(t *testing.T) { + srv := createBackend("/v1development/profiles", func(writer http.ResponseWriter, _ *http.Request) { + response := pprofileotlp.NewExportResponse() + partial := response.PartialSuccess() + partial.SetErrorMessage("hello") + partial.SetRejectedProfiles(1) + bytes, err := response.MarshalProto() + require.NoError(t, err) + writer.Header().Set("Content-Type", "application/x-protobuf") + _, err = writer.Write(bytes) + require.NoError(t, err) + }) + defer srv.Close() + + cfg := &Config{ + Encoding: EncodingProto, + ClientConfig: confighttp.ClientConfig{ + Endpoint: srv.URL, + }, + } + set := exportertest.NewNopSettings() + logger, observed := observer.New(zap.DebugLevel) + set.TelemetrySettings.Logger = zap.New(logger) + exp, err := createProfilesExporter(context.Background(), set, cfg) + require.NoError(t, err) + + // start the exporter + err = exp.Start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, exp.Shutdown(context.Background())) + }) + + // generate data + profiles := pprofile.NewProfiles() + err = exp.ConsumeProfiles(context.Background(), profiles) + require.NoError(t, err) + require.Len(t, observed.FilterLevelExact(zap.WarnLevel).All(), 1) + require.Contains(t, observed.FilterLevelExact(zap.WarnLevel).All()[0].Message, "Partial success") +} + func TestEncoding(t *testing.T) { set := exportertest.NewNopSettings() set.BuildInfo.Description = "Collector" @@ -990,6 +1102,39 @@ func TestEncoding(t *testing.T) { }) } }) + + t.Run("profiles", func(t *testing.T) { + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + srv := createBackend("/v1development/profiles", func(writer http.ResponseWriter, request *http.Request) { + assert.Contains(t, request.Header.Get("content-type"), test.expectedEncoding) + writer.WriteHeader(200) + }) + defer srv.Close() + + cfg := &Config{ + ClientConfig: confighttp.ClientConfig{ + Endpoint: srv.URL, + }, + Encoding: test.encoding, + } + exp, err := createProfilesExporter(context.Background(), set, cfg) + require.NoError(t, err) + + // start the exporter + err = exp.Start(context.Background(), componenttest.NewNopHost()) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, exp.Shutdown(context.Background())) + }) + + // generate data + profiles := pprofile.NewProfiles() + err = exp.ConsumeProfiles(context.Background(), profiles) + require.NoError(t, err) + }) + } + }) } func createBackend(endpoint string, handler func(writer http.ResponseWriter, request *http.Request)) *httptest.Server { diff --git a/internal/obsreportconfig/obsmetrics/obs_exporter.go b/internal/obsreportconfig/obsmetrics/obs_exporter.go index 65f4e6aad3a..e69b30d2bc4 100644 --- a/internal/obsreportconfig/obsmetrics/obs_exporter.go +++ b/internal/obsreportconfig/obsmetrics/obs_exporter.go @@ -24,6 +24,11 @@ const ( SentLogRecordsKey = "sent_log_records" // FailedToSendLogRecordsKey used to track logs that failed to be sent by exporters. FailedToSendLogRecordsKey = "send_failed_log_records" + + // SentSamplesKey used to track profiles samples sent by exporters. + SentSamplesKey = "sent_samples" + // FailedToSendSamplesKey used to track samples that failed to be sent by exporters. + FailedToSendSamplesKey = "send_failed_samples" ) var ( diff --git a/versions.yaml b/versions.yaml index 2d1969ae6dd..124aec065ce 100644 --- a/versions.yaml +++ b/versions.yaml @@ -40,6 +40,7 @@ module-sets: - go.opentelemetry.io/collector/connector/forwardconnector - go.opentelemetry.io/collector/consumer - go.opentelemetry.io/collector/consumer/consumerprofiles + - go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles - go.opentelemetry.io/collector/consumer/consumertest - go.opentelemetry.io/collector/exporter - go.opentelemetry.io/collector/exporter/debugexporter From 885704347e95e46fd5fda20b10bfbe7442130133 Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Wed, 11 Sep 2024 11:39:19 +0200 Subject: [PATCH 11/18] run gotidy --- Makefile | 2 ++ cmd/builder/test/core.builder.yaml | 1 + cmd/otelcorecol/builder-config.yaml | 1 + cmd/otelcorecol/go.mod | 3 +++ .../consumererror/consumererrorprofiles/go.mod | 12 +++++++++--- .../consumererror/consumererrorprofiles/go.sum | 6 ------ consumer/go.mod | 2 +- exporter/debugexporter/go.mod | 6 ++++++ exporter/exporterprofiles/go.mod | 4 ++++ exporter/go.mod | 6 ++++++ exporter/loggingexporter/go.mod | 6 ++++++ exporter/nopexporter/go.mod | 4 ++++ exporter/otlpexporter/go.mod | 16 ++++++---------- exporter/otlphttpexporter/go.mod | 10 ++++++---- internal/e2e/go.mod | 3 +++ otelcol/go.mod | 2 ++ otelcol/otelcoltest/go.mod | 2 ++ service/go.mod | 2 ++ 18 files changed, 64 insertions(+), 24 deletions(-) diff --git a/Makefile b/Makefile index d27740260b1..9a18c42cb0d 100644 --- a/Makefile +++ b/Makefile @@ -287,6 +287,7 @@ check-contrib: -replace go.opentelemetry.io/collector/connector/forwardconnector=$(CURDIR)/connector/forwardconnector \ -replace go.opentelemetry.io/collector/consumer=$(CURDIR)/consumer \ -replace go.opentelemetry.io/collector/consumer/consumerprofiles=$(CURDIR)/consumer/consumerprofiles \ + -replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles=$(CURDIR)/consumer/consumererror/consumererrorprofiles \ -replace go.opentelemetry.io/collector/consumer/consumertest=$(CURDIR)/consumer/consumertest \ -replace go.opentelemetry.io/collector/exporter=$(CURDIR)/exporter \ -replace go.opentelemetry.io/collector/exporter/debugexporter=$(CURDIR)/exporter/debugexporter \ @@ -356,6 +357,7 @@ restore-contrib: -dropreplace go.opentelemetry.io/collector/connector/forwardconnector \ -dropreplace go.opentelemetry.io/collector/consumer \ -dropreplace go.opentelemetry.io/collector/consumer/consumerprofiles \ + -dropreplace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles \ -dropreplace go.opentelemetry.io/collector/consumer/consumertest \ -dropreplace go.opentelemetry.io/collector/exporter \ -dropreplace go.opentelemetry.io/collector/exporter/debugexporter \ diff --git a/cmd/builder/test/core.builder.yaml b/cmd/builder/test/core.builder.yaml index 866a625a91b..ba1292f9eb0 100644 --- a/cmd/builder/test/core.builder.yaml +++ b/cmd/builder/test/core.builder.yaml @@ -35,6 +35,7 @@ replaces: - go.opentelemetry.io/collector/confmap/provider/yamlprovider => ${WORKSPACE_DIR}/confmap/provider/yamlprovider - go.opentelemetry.io/collector/consumer => ${WORKSPACE_DIR}/consumer - go.opentelemetry.io/collector/consumer/consumerprofiles => ${WORKSPACE_DIR}/consumer/consumerprofiles + - go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ${WORKSPACE_DIR}/consumer/consumererror/consumererrorprofiles - go.opentelemetry.io/collector/consumer/consumertest => ${WORKSPACE_DIR}/consumer/consumertest - go.opentelemetry.io/collector/connector => ${WORKSPACE_DIR}/connector - go.opentelemetry.io/collector/connector/connectorprofiles => ${WORKSPACE_DIR}/connector/connectorprofiles diff --git a/cmd/otelcorecol/builder-config.yaml b/cmd/otelcorecol/builder-config.yaml index b5a726e374b..008be1ff917 100644 --- a/cmd/otelcorecol/builder-config.yaml +++ b/cmd/otelcorecol/builder-config.yaml @@ -64,6 +64,7 @@ replaces: - go.opentelemetry.io/collector/confmap/provider/yamlprovider => ../../confmap/provider/yamlprovider - go.opentelemetry.io/collector/consumer => ../../consumer - go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles + - go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles - go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest - go.opentelemetry.io/collector/connector => ../../connector - go.opentelemetry.io/collector/connector/connectorprofiles => ../../connector/connectorprofiles diff --git a/cmd/otelcorecol/go.mod b/cmd/otelcorecol/go.mod index 5de90ac84fd..f44d57b8018 100644 --- a/cmd/otelcorecol/go.mod +++ b/cmd/otelcorecol/go.mod @@ -95,6 +95,7 @@ require ( go.opentelemetry.io/collector/config/internal v0.109.0 // indirect go.opentelemetry.io/collector/connector/connectorprofiles v0.109.0 // indirect go.opentelemetry.io/collector/consumer v0.109.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.109.0 // indirect go.opentelemetry.io/collector/consumer/consumertest v0.109.0 // indirect go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 // indirect @@ -196,6 +197,8 @@ replace go.opentelemetry.io/collector/consumer => ../../consumer replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles +replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles + replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest replace go.opentelemetry.io/collector/connector => ../../connector diff --git a/consumer/consumererror/consumererrorprofiles/go.mod b/consumer/consumererror/consumererrorprofiles/go.mod index d7ed72753dd..dff43fadf63 100644 --- a/consumer/consumererror/consumererrorprofiles/go.mod +++ b/consumer/consumererror/consumererrorprofiles/go.mod @@ -5,8 +5,8 @@ go 1.22.0 require ( github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/consumer v0.0.0-00010101000000-000000000000 - go.opentelemetry.io/collector/pdata/pprofile v0.108.1 - go.opentelemetry.io/collector/pdata/testdata v0.108.1 + go.opentelemetry.io/collector/pdata/pprofile v0.109.0 + go.opentelemetry.io/collector/pdata/testdata v0.109.0 ) require ( @@ -16,7 +16,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - go.opentelemetry.io/collector/pdata v1.14.1 // indirect + go.opentelemetry.io/collector/pdata v1.15.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/net v0.26.0 // indirect golang.org/x/sys v0.21.0 // indirect @@ -28,3 +28,9 @@ require ( ) replace go.opentelemetry.io/collector/consumer => ../.. + +replace go.opentelemetry.io/collector/pdata => ../../../pdata + +replace go.opentelemetry.io/collector/pdata/testdata => ../../../pdata/testdata + +replace go.opentelemetry.io/collector/pdata/pprofile => ../../../pdata/pprofile diff --git a/consumer/consumererror/consumererrorprofiles/go.sum b/consumer/consumererror/consumererrorprofiles/go.sum index 5117c125c5e..03ca0e47eea 100644 --- a/consumer/consumererror/consumererrorprofiles/go.sum +++ b/consumer/consumererror/consumererrorprofiles/go.sum @@ -29,12 +29,6 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/collector/pdata v1.14.1 h1:wXZjtQA7Vy5HFqco+yA95ENyMQU5heBB1IxMHQf6mUk= -go.opentelemetry.io/collector/pdata v1.14.1/go.mod h1:z1dTjwwtcoXxZx2/nkHysjxMeaxe9pEmYTEr4SMNIx8= -go.opentelemetry.io/collector/pdata/pprofile v0.108.1 h1:/XbunfZ+/jt1+d1p4zM4vZ/AgeaIJsayjYdlN1fV+tk= -go.opentelemetry.io/collector/pdata/pprofile v0.108.1/go.mod h1:/GvG2WcN9Dajlw4QaIOjgz7N32wSfPL3qxJ0BKOcVPo= -go.opentelemetry.io/collector/pdata/testdata v0.108.1 h1:TpBDoBMBYvC/Ibswe3Ec2eof8XrRrEec6+tfnTeTSGk= -go.opentelemetry.io/collector/pdata/testdata v0.108.1/go.mod h1:PdUmBA4yDRD4Wf0fpCyrpdZexz9EDoHBw5Ot4iIUPRs= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= diff --git a/consumer/go.mod b/consumer/go.mod index d3db6f7b441..ccdb41d6cd2 100644 --- a/consumer/go.mod +++ b/consumer/go.mod @@ -5,6 +5,7 @@ go 1.22.0 require ( github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/pdata v1.15.0 + go.opentelemetry.io/collector/pdata/pprofile v0.109.0 go.opentelemetry.io/collector/pdata/testdata v0.109.0 go.uber.org/goleak v1.3.0 ) @@ -16,7 +17,6 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.109.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/net v0.26.0 // indirect golang.org/x/sys v0.21.0 // indirect diff --git a/exporter/debugexporter/go.mod b/exporter/debugexporter/go.mod index f1baf874d33..6759007222d 100644 --- a/exporter/debugexporter/go.mod +++ b/exporter/debugexporter/go.mod @@ -41,7 +41,9 @@ require ( github.com/prometheus/common v0.59.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect go.opentelemetry.io/collector v0.109.0 // indirect + go.opentelemetry.io/collector/component/componentprofiles v0.109.0 // indirect go.opentelemetry.io/collector/config/configretry v1.15.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.109.0 // indirect go.opentelemetry.io/collector/consumer/consumertest v0.109.0 // indirect go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 // indirect @@ -96,6 +98,8 @@ replace go.opentelemetry.io/collector/config/configretry => ../../config/configr replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles +replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles + replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest replace go.opentelemetry.io/collector/component/componentstatus => ../../component/componentstatus @@ -103,3 +107,5 @@ replace go.opentelemetry.io/collector/component/componentstatus => ../../compone replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../receiver/receiverprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporterprofiles + +replace go.opentelemetry.io/collector/component/componentprofiles => ../../component/componentprofiles diff --git a/exporter/exporterprofiles/go.mod b/exporter/exporterprofiles/go.mod index 86817aac2c8..2ff006dfd7f 100644 --- a/exporter/exporterprofiles/go.mod +++ b/exporter/exporterprofiles/go.mod @@ -70,3 +70,7 @@ replace go.opentelemetry.io/collector/exporter => ../ replace go.opentelemetry.io/collector/component/componentstatus => ../../component/componentstatus replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../receiver/receiverprofiles + +replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles + +replace go.opentelemetry.io/collector/component/componentprofiles => ../../component/componentprofiles diff --git a/exporter/go.mod b/exporter/go.mod index 5cab4475031..9c6c92d63df 100644 --- a/exporter/go.mod +++ b/exporter/go.mod @@ -8,9 +8,11 @@ require ( github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector v0.109.0 go.opentelemetry.io/collector/component v0.109.0 + go.opentelemetry.io/collector/component/componentprofiles v0.109.0 go.opentelemetry.io/collector/config/configretry v1.15.0 go.opentelemetry.io/collector/config/configtelemetry v0.109.0 go.opentelemetry.io/collector/consumer v0.109.0 + go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 go.opentelemetry.io/collector/consumer/consumerprofiles v0.109.0 go.opentelemetry.io/collector/consumer/consumertest v0.109.0 go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 @@ -88,6 +90,8 @@ replace go.opentelemetry.io/collector/config/configtelemetry => ../config/config replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../consumer/consumerprofiles +replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../consumer/consumererror/consumererrorprofiles + replace go.opentelemetry.io/collector/consumer/consumertest => ../consumer/consumertest replace go.opentelemetry.io/collector/component/componentstatus => ../component/componentstatus @@ -95,3 +99,5 @@ replace go.opentelemetry.io/collector/component/componentstatus => ../component/ replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../receiver/receiverprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ./exporterprofiles + +replace go.opentelemetry.io/collector/component/componentprofiles => ../component/componentprofiles diff --git a/exporter/loggingexporter/go.mod b/exporter/loggingexporter/go.mod index 02cc24d6b40..ff8640acd34 100644 --- a/exporter/loggingexporter/go.mod +++ b/exporter/loggingexporter/go.mod @@ -40,8 +40,10 @@ require ( github.com/prometheus/common v0.59.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect go.opentelemetry.io/collector v0.109.0 // indirect + go.opentelemetry.io/collector/component/componentprofiles v0.109.0 // indirect go.opentelemetry.io/collector/config/configretry v1.15.0 // indirect go.opentelemetry.io/collector/consumer v0.109.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.109.0 // indirect go.opentelemetry.io/collector/consumer/consumertest v0.109.0 // indirect go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 // indirect @@ -108,3 +110,7 @@ replace go.opentelemetry.io/collector/component/componentstatus => ../../compone replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../receiver/receiverprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporterprofiles + +replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles + +replace go.opentelemetry.io/collector/component/componentprofiles => ../../component/componentprofiles diff --git a/exporter/nopexporter/go.mod b/exporter/nopexporter/go.mod index 75db7bde02c..880d6344b4e 100644 --- a/exporter/nopexporter/go.mod +++ b/exporter/nopexporter/go.mod @@ -97,3 +97,7 @@ replace go.opentelemetry.io/collector/component/componentstatus => ../../compone replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../receiver/receiverprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporterprofiles + +replace go.opentelemetry.io/collector/component/componentprofiles => ../../component/componentprofiles + +replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles diff --git a/exporter/otlpexporter/go.mod b/exporter/otlpexporter/go.mod index d76369a57c4..e7a9de3cd66 100644 --- a/exporter/otlpexporter/go.mod +++ b/exporter/otlpexporter/go.mod @@ -7,21 +7,17 @@ require ( go.opentelemetry.io/collector v0.109.0 go.opentelemetry.io/collector/component v0.109.0 go.opentelemetry.io/collector/config/configauth v0.109.0 - go.opentelemetry.io/collector/config/configcompression v1.14.1 go.opentelemetry.io/collector/config/configcompression v1.15.0 go.opentelemetry.io/collector/config/configgrpc v0.109.0 - go.opentelemetry.io/collector/config/configopaque v1.14.1 go.opentelemetry.io/collector/config/configopaque v1.15.0 - go.opentelemetry.io/collector/config/configretry v1.14.1 go.opentelemetry.io/collector/config/configretry v1.15.0 - go.opentelemetry.io/collector/config/configtls v1.14.1 go.opentelemetry.io/collector/config/configtls v1.15.0 - go.opentelemetry.io/collector/confmap v1.14.1 go.opentelemetry.io/collector/confmap v1.15.0 go.opentelemetry.io/collector/consumer v0.109.0 go.opentelemetry.io/collector/exporter v0.109.0 - go.opentelemetry.io/collector/pdata v1.14.1 + go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 go.opentelemetry.io/collector/pdata v1.15.0 + go.opentelemetry.io/collector/pdata/pprofile v0.109.0 go.opentelemetry.io/collector/pdata/testdata v0.109.0 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 @@ -61,17 +57,17 @@ require ( github.com/prometheus/common v0.59.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect go.opentelemetry.io/collector/client v1.15.0 // indirect + go.opentelemetry.io/collector/component/componentprofiles v0.109.0 // indirect go.opentelemetry.io/collector/config/confignet v0.109.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.109.0 // indirect go.opentelemetry.io/collector/config/internal v0.109.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.109.0 // indirect go.opentelemetry.io/collector/consumer/consumertest v0.109.0 // indirect - go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 // indirect go.opentelemetry.io/collector/extension v0.109.0 // indirect go.opentelemetry.io/collector/extension/auth v0.109.0 // indirect go.opentelemetry.io/collector/extension/experimental/storage v0.109.0 // indirect go.opentelemetry.io/collector/featuregate v1.15.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.109.0 // indirect go.opentelemetry.io/collector/receiver v0.109.0 // indirect go.opentelemetry.io/collector/receiver/receiverprofiles v0.109.0 // indirect go.opentelemetry.io/contrib/config v0.9.0 // indirect @@ -163,6 +159,6 @@ replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../receive replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporterprofiles -replace go.opentelemetry.io/collector/component/componentprofiles => ../../component/componentprofiles - replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles + +replace go.opentelemetry.io/collector/component/componentprofiles => ../../component/componentprofiles diff --git a/exporter/otlphttpexporter/go.mod b/exporter/otlphttpexporter/go.mod index 5a155d8eaef..f158a1a6adb 100644 --- a/exporter/otlphttpexporter/go.mod +++ b/exporter/otlphttpexporter/go.mod @@ -14,7 +14,9 @@ require ( go.opentelemetry.io/collector/confmap v1.15.0 go.opentelemetry.io/collector/consumer v0.109.0 go.opentelemetry.io/collector/exporter v0.109.0 + go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 go.opentelemetry.io/collector/pdata v1.15.0 + go.opentelemetry.io/collector/pdata/pprofile v0.109.0 go.uber.org/goleak v1.3.0 go.uber.org/zap v1.27.0 google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd @@ -54,17 +56,17 @@ require ( github.com/prometheus/procfs v0.15.1 // indirect github.com/rs/cors v1.11.1 // indirect go.opentelemetry.io/collector/client v1.15.0 // indirect + go.opentelemetry.io/collector/component/componentprofiles v0.109.0 // indirect go.opentelemetry.io/collector/config/configauth v0.109.0 // indirect go.opentelemetry.io/collector/config/configtelemetry v0.109.0 // indirect go.opentelemetry.io/collector/config/internal v0.109.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.109.0 // indirect go.opentelemetry.io/collector/consumer/consumertest v0.109.0 // indirect - go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 // indirect go.opentelemetry.io/collector/extension v0.109.0 // indirect go.opentelemetry.io/collector/extension/auth v0.109.0 // indirect go.opentelemetry.io/collector/extension/experimental/storage v0.109.0 // indirect go.opentelemetry.io/collector/featuregate v1.15.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.109.0 // indirect go.opentelemetry.io/collector/receiver v0.109.0 // indirect go.opentelemetry.io/collector/receiver/receiverprofiles v0.109.0 // indirect go.opentelemetry.io/contrib/config v0.9.0 // indirect @@ -154,6 +156,6 @@ replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../receive replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporterprofiles -replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles - replace go.opentelemetry.io/collector/component/componentprofiles => ../../component/componentprofiles + +replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles diff --git a/internal/e2e/go.mod b/internal/e2e/go.mod index 515b9e89963..097e2383acd 100644 --- a/internal/e2e/go.mod +++ b/internal/e2e/go.mod @@ -77,6 +77,7 @@ require ( go.opentelemetry.io/collector/config/confignet v0.109.0 // indirect go.opentelemetry.io/collector/config/internal v0.109.0 // indirect go.opentelemetry.io/collector/connector/connectorprofiles v0.109.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.109.0 // indirect go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 // indirect go.opentelemetry.io/collector/extension/auth v0.109.0 // indirect @@ -206,3 +207,5 @@ replace go.opentelemetry.io/collector/processor/processorprofiles => ../../proce replace go.opentelemetry.io/collector/connector/connectorprofiles => ../../connector/connectorprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../../exporter/exporterprofiles + +replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles diff --git a/otelcol/go.mod b/otelcol/go.mod index 9ce0b07ecb8..4c652584169 100644 --- a/otelcol/go.mod +++ b/otelcol/go.mod @@ -178,3 +178,5 @@ replace go.opentelemetry.io/collector/processor/processorprofiles => ../processo replace go.opentelemetry.io/collector/connector/connectorprofiles => ../connector/connectorprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporter/exporterprofiles + +replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../consumer/consumererror/consumererrorprofiles diff --git a/otelcol/otelcoltest/go.mod b/otelcol/otelcoltest/go.mod index bca3cb4814c..9a2779b4427 100644 --- a/otelcol/otelcoltest/go.mod +++ b/otelcol/otelcoltest/go.mod @@ -193,3 +193,5 @@ replace go.opentelemetry.io/collector/processor/processorprofiles => ../../proce replace go.opentelemetry.io/collector/connector/connectorprofiles => ../../connector/connectorprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../../exporter/exporterprofiles + +replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles diff --git a/service/go.mod b/service/go.mod index 14591fba060..49dddd51c79 100644 --- a/service/go.mod +++ b/service/go.mod @@ -186,3 +186,5 @@ replace go.opentelemetry.io/collector/processor/processorprofiles => ../processo replace go.opentelemetry.io/collector/connector/connectorprofiles => ../connector/connectorprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporter/exporterprofiles + +replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../consumer/consumererror/consumererrorprofiles From 6f56650b93920fd89818d5a7acb7e3471dea4617 Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Wed, 11 Sep 2024 11:46:29 +0200 Subject: [PATCH 12/18] add changelog entry --- .chloggen/otlpexporter-profiles.yaml | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 .chloggen/otlpexporter-profiles.yaml diff --git a/.chloggen/otlpexporter-profiles.yaml b/.chloggen/otlpexporter-profiles.yaml new file mode 100644 index 00000000000..ae21e351e91 --- /dev/null +++ b/.chloggen/otlpexporter-profiles.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: otlpexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Support profiles in the OTLP exporters + +# One or more tracking issues or pull requests related to the change +issues: [11131] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] From b6e723ec9a73f7b3f513bf19af4e0f5f4e0dcce1 Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Thu, 12 Sep 2024 10:40:59 +0200 Subject: [PATCH 13/18] move profiles to development --- exporter/otlpexporter/README.md | 4 +++- exporter/otlpexporter/internal/metadata/generated_status.go | 2 +- exporter/otlpexporter/metadata.yaml | 3 ++- exporter/otlphttpexporter/README.md | 4 +++- .../otlphttpexporter/internal/metadata/generated_status.go | 2 +- exporter/otlphttpexporter/metadata.yaml | 4 ++-- 6 files changed, 12 insertions(+), 7 deletions(-) diff --git a/exporter/otlpexporter/README.md b/exporter/otlpexporter/README.md index 55f0464a2ab..adce2e4ac0b 100644 --- a/exporter/otlpexporter/README.md +++ b/exporter/otlpexporter/README.md @@ -3,11 +3,13 @@ | Status | | | ------------- |-----------| -| Stability | [beta]: logs, profiles | +| Stability | [development]: profiles | +| | [beta]: logs | | | [stable]: traces, metrics | | Distributions | [core], [contrib], [k8s] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fotlp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fotlp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fotlp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fotlp) | +[development]: https://github.com/open-telemetry/opentelemetry-collector#development [beta]: https://github.com/open-telemetry/opentelemetry-collector#beta [stable]: https://github.com/open-telemetry/opentelemetry-collector#stable [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol diff --git a/exporter/otlpexporter/internal/metadata/generated_status.go b/exporter/otlpexporter/internal/metadata/generated_status.go index 36f02238361..82f0f448a86 100644 --- a/exporter/otlpexporter/internal/metadata/generated_status.go +++ b/exporter/otlpexporter/internal/metadata/generated_status.go @@ -12,8 +12,8 @@ var ( ) const ( + ProfilesStability = component.StabilityLevelDevelopment LogsStability = component.StabilityLevelBeta - ProfilesStability = component.StabilityLevelBeta TracesStability = component.StabilityLevelStable MetricsStability = component.StabilityLevelStable ) diff --git a/exporter/otlpexporter/metadata.yaml b/exporter/otlpexporter/metadata.yaml index 3ce303e8c8e..ff08c4f0f18 100644 --- a/exporter/otlpexporter/metadata.yaml +++ b/exporter/otlpexporter/metadata.yaml @@ -5,7 +5,8 @@ status: class: exporter stability: stable: [traces, metrics] - beta: [logs, profiles] + beta: [logs] + development: [profiles] distributions: [core, contrib, k8s] tests: diff --git a/exporter/otlphttpexporter/README.md b/exporter/otlphttpexporter/README.md index ea75602eb16..422347d9d6d 100644 --- a/exporter/otlphttpexporter/README.md +++ b/exporter/otlphttpexporter/README.md @@ -3,11 +3,13 @@ | Status | | | ------------- |-----------| -| Stability | [beta]: logs, profiles | +| Stability | [development]: profiles | +| | [beta]: logs | | | [stable]: traces, metrics | | Distributions | [core], [contrib], [k8s] | | Issues | [![Open issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aopen%20label%3Aexporter%2Fotlphttp%20&label=open&color=orange&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aopen+is%3Aissue+label%3Aexporter%2Fotlphttp) [![Closed issues](https://img.shields.io/github/issues-search/open-telemetry/opentelemetry-collector?query=is%3Aissue%20is%3Aclosed%20label%3Aexporter%2Fotlphttp%20&label=closed&color=blue&logo=opentelemetry)](https://github.com/open-telemetry/opentelemetry-collector/issues?q=is%3Aclosed+is%3Aissue+label%3Aexporter%2Fotlphttp) | +[development]: https://github.com/open-telemetry/opentelemetry-collector#development [beta]: https://github.com/open-telemetry/opentelemetry-collector#beta [stable]: https://github.com/open-telemetry/opentelemetry-collector#stable [core]: https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol diff --git a/exporter/otlphttpexporter/internal/metadata/generated_status.go b/exporter/otlphttpexporter/internal/metadata/generated_status.go index fe3678e85e3..07bd3d1d7a6 100644 --- a/exporter/otlphttpexporter/internal/metadata/generated_status.go +++ b/exporter/otlphttpexporter/internal/metadata/generated_status.go @@ -12,8 +12,8 @@ var ( ) const ( + ProfilesStability = component.StabilityLevelDevelopment LogsStability = component.StabilityLevelBeta - ProfilesStability = component.StabilityLevelBeta TracesStability = component.StabilityLevelStable MetricsStability = component.StabilityLevelStable ) diff --git a/exporter/otlphttpexporter/metadata.yaml b/exporter/otlphttpexporter/metadata.yaml index cb86047581c..08d004c45ec 100644 --- a/exporter/otlphttpexporter/metadata.yaml +++ b/exporter/otlphttpexporter/metadata.yaml @@ -5,10 +5,10 @@ status: class: exporter stability: stable: [traces, metrics] - beta: [logs, profiles] + beta: [logs] + development: [profiles] distributions: [core, contrib, k8s] tests: config: endpoint: "https://1.2.3.4:1234" - From c14ae5aba347341a7c1f854ad4b239a96f26b38f Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Thu, 12 Sep 2024 14:04:20 +0200 Subject: [PATCH 14/18] extract profiles exporter helper into its own module --- Makefile | 2 + cmd/builder/internal/builder/main_test.go | 1 + cmd/otelcorecol/builder-config.yaml | 1 + cmd/otelcorecol/go.mod | 5 +- .../signalerrors_test.go | 4 +- exporter/debugexporter/go.mod | 6 - exporter/exporterhelper/common.go | 300 +-------------- exporter/exporterhelper/constants.go | 4 - exporter/exporterhelper/exporterhelper.go | 6 - .../exporterhelperprofiles/Makefile | 1 + .../exporterhelperprofiles/constants.go | 19 + .../exporterhelperprofiles/go.mod | 94 +++++ .../exporterhelperprofiles/go.sum | 118 ++++++ .../{ => exporterhelperprofiles}/profiles.go | 52 +-- .../profiles_batch.go | 11 +- .../profiles_batch_test.go | 25 +- .../profiles_test.go | 55 +-- .../exporterhelper/internal/base_exporter.go | 341 ++++++++++++++++++ .../base_exporter_test.go} | 40 +- .../{ => internal}/batch_sender.go | 43 +-- .../{ => internal}/batch_sender_test.go | 137 +++---- .../{ => internal}/obsexporter.go | 109 +++--- .../{ => internal}/obsexporter_test.go | 87 +++-- .../exporterhelper/internal/queue_sender.go | 154 ++++++++ .../{ => internal}/queue_sender_test.go | 123 +++---- .../{request_test.go => internal/request.go} | 41 ++- .../exporterhelper/internal/request_sender.go | 33 ++ .../exporterhelper/internal/retry_sender.go | 142 ++++++++ .../{ => internal}/retry_sender_test.go | 91 ++--- .../exporterhelper/internal/timeout_sender.go | 52 +++ .../{ => internal}/timeout_sender_test.go | 2 +- exporter/exporterhelper/logs.go | 31 +- exporter/exporterhelper/logs_test.go | 26 +- exporter/exporterhelper/metrics.go | 31 +- exporter/exporterhelper/metrics_test.go | 26 +- exporter/exporterhelper/obsreport_test.go | 15 +- exporter/exporterhelper/queue_sender.go | 133 +------ exporter/exporterhelper/retry_sender.go | 129 +------ exporter/exporterhelper/timeout_sender.go | 43 +-- exporter/exporterhelper/traces.go | 31 +- exporter/exporterhelper/traces_test.go | 26 +- exporter/exporterprofiles/go.mod | 4 - exporter/go.mod | 6 - exporter/loggingexporter/go.mod | 6 - exporter/nopexporter/go.mod | 4 - exporter/otlpexporter/factory.go | 5 +- exporter/otlpexporter/factory_test.go | 2 +- exporter/otlpexporter/go.mod | 3 + exporter/otlpexporter/otlp_test.go | 2 +- exporter/otlphttpexporter/factory.go | 5 +- exporter/otlphttpexporter/go.mod | 3 + internal/e2e/go.mod | 3 + otelcol/go.mod | 2 - otelcol/otelcoltest/go.mod | 2 - service/go.mod | 2 - versions.yaml | 1 + 56 files changed, 1513 insertions(+), 1127 deletions(-) create mode 100644 exporter/exporterhelper/exporterhelperprofiles/Makefile create mode 100644 exporter/exporterhelper/exporterhelperprofiles/constants.go create mode 100644 exporter/exporterhelper/exporterhelperprofiles/go.mod create mode 100644 exporter/exporterhelper/exporterhelperprofiles/go.sum rename exporter/exporterhelper/{ => exporterhelperprofiles}/profiles.go (71%) rename exporter/exporterhelper/{ => exporterhelperprofiles}/profiles_batch.go (88%) rename exporter/exporterhelper/{ => exporterhelperprofiles}/profiles_batch_test.go (89%) rename exporter/exporterhelper/{ => exporterhelperprofiles}/profiles_test.go (86%) create mode 100644 exporter/exporterhelper/internal/base_exporter.go rename exporter/exporterhelper/{common_test.go => internal/base_exporter_test.go} (69%) rename exporter/exporterhelper/{ => internal}/batch_sender.go (81%) rename exporter/exporterhelper/{ => internal}/batch_sender_test.go (83%) rename exporter/exporterhelper/{ => internal}/obsexporter.go (50%) rename exporter/exporterhelper/{ => internal}/obsexporter_test.go (73%) create mode 100644 exporter/exporterhelper/internal/queue_sender.go rename exporter/exporterhelper/{ => internal}/queue_sender_test.go (77%) rename exporter/exporterhelper/{request_test.go => internal/request.go} (67%) create mode 100644 exporter/exporterhelper/internal/request_sender.go create mode 100644 exporter/exporterhelper/internal/retry_sender.go rename exporter/exporterhelper/{ => internal}/retry_sender_test.go (78%) create mode 100644 exporter/exporterhelper/internal/timeout_sender.go rename exporter/exporterhelper/{ => internal}/timeout_sender_test.go (95%) diff --git a/Makefile b/Makefile index 9a18c42cb0d..f0b684f4b08 100644 --- a/Makefile +++ b/Makefile @@ -290,6 +290,7 @@ check-contrib: -replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles=$(CURDIR)/consumer/consumererror/consumererrorprofiles \ -replace go.opentelemetry.io/collector/consumer/consumertest=$(CURDIR)/consumer/consumertest \ -replace go.opentelemetry.io/collector/exporter=$(CURDIR)/exporter \ + -replace go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles=$(CURDIR)/exporter/exporterhelper/exporterhelperprofiles \ -replace go.opentelemetry.io/collector/exporter/debugexporter=$(CURDIR)/exporter/debugexporter \ -replace go.opentelemetry.io/collector/exporter/exporterprofiles=$(CURDIR)/exporter/exporterprofiles \ -replace go.opentelemetry.io/collector/exporter/loggingexporter=$(CURDIR)/exporter/loggingexporter \ @@ -360,6 +361,7 @@ restore-contrib: -dropreplace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles \ -dropreplace go.opentelemetry.io/collector/consumer/consumertest \ -dropreplace go.opentelemetry.io/collector/exporter \ + -dropreplace go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles \ -dropreplace go.opentelemetry.io/collector/exporter/debugexporter \ -dropreplace go.opentelemetry.io/collector/exporter/loggingexporter \ -dropreplace go.opentelemetry.io/collector/exporter/nopexporter \ diff --git a/cmd/builder/internal/builder/main_test.go b/cmd/builder/internal/builder/main_test.go index 7ce5c343036..611f4c6db1e 100644 --- a/cmd/builder/internal/builder/main_test.go +++ b/cmd/builder/internal/builder/main_test.go @@ -69,6 +69,7 @@ var ( "/exporter", "/exporter/debugexporter", "/exporter/exporterprofiles", + "/exporter/exporterhelper/exporterhelperprofiles", "/exporter/nopexporter", "/exporter/otlpexporter", "/exporter/otlphttpexporter", diff --git a/cmd/otelcorecol/builder-config.yaml b/cmd/otelcorecol/builder-config.yaml index 008be1ff917..7907db87620 100644 --- a/cmd/otelcorecol/builder-config.yaml +++ b/cmd/otelcorecol/builder-config.yaml @@ -72,6 +72,7 @@ replaces: - go.opentelemetry.io/collector/exporter => ../../exporter - go.opentelemetry.io/collector/exporter/debugexporter => ../../exporter/debugexporter - go.opentelemetry.io/collector/exporter/exporterprofiles => ../../exporter/exporterprofiles + - go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles => ../../exporter/exporterhelper/exporterhelperprofiles - go.opentelemetry.io/collector/exporter/loggingexporter => ../../exporter/loggingexporter - go.opentelemetry.io/collector/exporter/nopexporter => ../../exporter/nopexporter - go.opentelemetry.io/collector/exporter/otlpexporter => ../../exporter/otlpexporter diff --git a/cmd/otelcorecol/go.mod b/cmd/otelcorecol/go.mod index 1b52d479dda..2f1db828d0e 100644 --- a/cmd/otelcorecol/go.mod +++ b/cmd/otelcorecol/go.mod @@ -4,7 +4,7 @@ module go.opentelemetry.io/collector/cmd/otelcorecol go 1.22.0 -toolchain go1.23.1 +toolchain go1.22.7 require ( go.opentelemetry.io/collector/component v0.109.0 @@ -98,6 +98,7 @@ require ( go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.109.0 // indirect go.opentelemetry.io/collector/consumer/consumertest v0.109.0 // indirect + go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 // indirect go.opentelemetry.io/collector/extension/auth v0.109.0 // indirect go.opentelemetry.io/collector/extension/experimental/storage v0.109.0 // indirect @@ -213,6 +214,8 @@ replace go.opentelemetry.io/collector/exporter/debugexporter => ../../exporter/d replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../../exporter/exporterprofiles +replace go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles => ../../exporter/exporterhelper/exporterhelperprofiles + replace go.opentelemetry.io/collector/exporter/loggingexporter => ../../exporter/loggingexporter replace go.opentelemetry.io/collector/exporter/nopexporter => ../../exporter/nopexporter diff --git a/consumer/consumererror/consumererrorprofiles/signalerrors_test.go b/consumer/consumererror/consumererrorprofiles/signalerrors_test.go index c9ea6b3ab9d..014040ff1e8 100644 --- a/consumer/consumererror/consumererrorprofiles/signalerrors_test.go +++ b/consumer/consumererror/consumererrorprofiles/signalerrors_test.go @@ -21,7 +21,7 @@ func TestProfiles(t *testing.T) { var target Profiles assert.False(t, errors.As(nil, &target)) assert.False(t, errors.As(err, &target)) - assert.True(t, errors.As(profileErr, &target)) + assert.ErrorAs(t, profileErr, &target) assert.Equal(t, td, target.Data()) } @@ -33,7 +33,7 @@ func TestProfiles_Unwrap(t *testing.T) { target := testErrorType{} require.NotEqual(t, err, target) // Unwrapping profileErr for err and assigning to target. - require.True(t, errors.As(profileErr, &target)) + require.ErrorAs(t, profileErr, &target) require.Equal(t, err, target) } diff --git a/exporter/debugexporter/go.mod b/exporter/debugexporter/go.mod index fa42d996a0a..88f4dd3818f 100644 --- a/exporter/debugexporter/go.mod +++ b/exporter/debugexporter/go.mod @@ -40,9 +40,7 @@ require ( github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.59.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect - go.opentelemetry.io/collector/component/componentprofiles v0.109.0 // indirect go.opentelemetry.io/collector/config/configretry v1.15.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.109.0 // indirect go.opentelemetry.io/collector/consumer/consumertest v0.109.0 // indirect go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 // indirect @@ -93,12 +91,8 @@ replace go.opentelemetry.io/collector/config/configretry => ../../config/configr replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../consumer/consumerprofiles -replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles - replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/consumertest replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../receiver/receiverprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporterprofiles - -replace go.opentelemetry.io/collector/component/componentprofiles => ../../component/componentprofiles diff --git a/exporter/exporterhelper/common.go b/exporter/exporterhelper/common.go index 0a3238d3561..de9822ee6aa 100644 --- a/exporter/exporterhelper/common.go +++ b/exporter/exporterhelper/common.go @@ -4,115 +4,46 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" import ( - "context" - "fmt" - - "go.uber.org/multierr" - "go.uber.org/zap" - "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/consumer" - "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" ) -// requestSender is an abstraction of a sender for a request independent of the type of the data (traces, metrics, logs). -type requestSender interface { - component.Component - send(context.Context, Request) error - setNextSender(nextSender requestSender) -} - -type baseRequestSender struct { - component.StartFunc - component.ShutdownFunc - nextSender requestSender -} - -var _ requestSender = (*baseRequestSender)(nil) - -func (b *baseRequestSender) send(ctx context.Context, req Request) error { - return b.nextSender.send(ctx, req) -} - -func (b *baseRequestSender) setNextSender(nextSender requestSender) { - b.nextSender = nextSender -} - -type obsrepSenderFactory func(obsrep *obsReport) requestSender - -// Option apply changes to baseExporter. -type Option func(*baseExporter) error +// Option apply changes to BaseExporter. +type Option = internal.Option // WithStart overrides the default Start function for an exporter. // The default start function does nothing and always returns nil. func WithStart(start component.StartFunc) Option { - return func(o *baseExporter) error { - o.StartFunc = start - return nil - } + return internal.WithStart(start) } // WithShutdown overrides the default Shutdown function for an exporter. // The default shutdown function does nothing and always returns nil. func WithShutdown(shutdown component.ShutdownFunc) Option { - return func(o *baseExporter) error { - o.ShutdownFunc = shutdown - return nil - } + return internal.WithShutdown(shutdown) } // WithTimeout overrides the default TimeoutConfig for an exporter. // The default TimeoutConfig is 5 seconds. func WithTimeout(timeoutConfig TimeoutConfig) Option { - return func(o *baseExporter) error { - o.timeoutSender.cfg = timeoutConfig - return nil - } + return internal.WithTimeout(timeoutConfig) } // WithRetry overrides the default configretry.BackOffConfig for an exporter. // The default configretry.BackOffConfig is to disable retries. func WithRetry(config configretry.BackOffConfig) Option { - return func(o *baseExporter) error { - if !config.Enabled { - o.exportFailureMessage += " Try enabling retry_on_failure config option to retry on retryable errors." - return nil - } - o.retrySender = newRetrySender(config, o.set) - return nil - } + return internal.WithRetry(config) } // WithQueue overrides the default QueueConfig for an exporter. // The default QueueConfig is to disable queueing. // This option cannot be used with the new exporter helpers New[Traces|Metrics|Logs]RequestExporter. -func WithQueue(config QueueConfig) Option { - return func(o *baseExporter) error { - if o.marshaler == nil || o.unmarshaler == nil { - return fmt.Errorf("WithQueue option is not available for the new request exporters, use WithRequestQueue instead") - } - if !config.Enabled { - o.exportFailureMessage += " Try enabling sending_queue to survive temporary failures." - return nil - } - qf := exporterqueue.NewPersistentQueueFactory[Request](config.StorageID, exporterqueue.PersistentQueueSettings[Request]{ - Marshaler: o.marshaler, - Unmarshaler: o.unmarshaler, - }) - q := qf(context.Background(), exporterqueue.Settings{ - DataType: o.signal, - ExporterSettings: o.set, - }, exporterqueue.Config{ - Enabled: config.Enabled, - NumConsumers: config.NumConsumers, - QueueSize: config.QueueSize, - }) - o.queueSender = newQueueSender(q, o.set, config.NumConsumers, o.exportFailureMessage, o.obsrep) - return nil - } +func WithQueue(config internal.QueueConfig) Option { + return internal.WithQueue(config) } // WithRequestQueue enables queueing for an exporter. @@ -120,46 +51,22 @@ func WithQueue(config QueueConfig) Option { // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. func WithRequestQueue(cfg exporterqueue.Config, queueFactory exporterqueue.Factory[Request]) Option { - return func(o *baseExporter) error { - if o.marshaler != nil || o.unmarshaler != nil { - return fmt.Errorf("WithRequestQueue option must be used with the new request exporters only, use WithQueue instead") - } - if !cfg.Enabled { - o.exportFailureMessage += " Try enabling sending_queue to survive temporary failures." - return nil - } - o.queueCfg = cfg - o.queueFactory = queueFactory - return nil - } + return internal.WithRequestQueue(cfg, queueFactory) } // WithCapabilities overrides the default Capabilities() function for a Consumer. // The default is non-mutable data. // TODO: Verify if we can change the default to be mutable as we do for processors. func WithCapabilities(capabilities consumer.Capabilities) Option { - return func(o *baseExporter) error { - o.consumerOptions = append(o.consumerOptions, consumer.WithCapabilities(capabilities)) - return nil - } + return internal.WithCapabilities(capabilities) } // BatcherOption apply changes to batcher sender. -type BatcherOption func(*batchSender) error +type BatcherOption = internal.BatcherOption // WithRequestBatchFuncs sets the functions for merging and splitting batches for an exporter built for custom request types. func WithRequestBatchFuncs(mf exporterbatcher.BatchMergeFunc[Request], msf exporterbatcher.BatchMergeSplitFunc[Request]) BatcherOption { - return func(bs *batchSender) error { - if mf == nil || msf == nil { - return fmt.Errorf("WithRequestBatchFuncs must be provided with non-nil functions") - } - if bs.mergeFunc != nil || bs.mergeSplitFunc != nil { - return fmt.Errorf("WithRequestBatchFuncs can only be used once with request-based exporters") - } - bs.mergeFunc = mf - bs.mergeSplitFunc = msf - return nil - } + return internal.WithRequestBatchFuncs(mf, msf) } // WithBatcher enables batching for an exporter based on custom request types. @@ -168,184 +75,5 @@ func WithRequestBatchFuncs(mf exporterbatcher.BatchMergeFunc[Request], msf expor // This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. func WithBatcher(cfg exporterbatcher.Config, opts ...BatcherOption) Option { - return func(o *baseExporter) error { - o.batcherCfg = cfg - o.batcherOpts = opts - return nil - } -} - -// withMarshaler is used to set the request marshaler for the new exporter helper. -// It must be provided as the first option when creating a new exporter helper. -func withMarshaler(marshaler exporterqueue.Marshaler[Request]) Option { - return func(o *baseExporter) error { - o.marshaler = marshaler - return nil - } -} - -// withUnmarshaler is used to set the request unmarshaler for the new exporter helper. -// It must be provided as the first option when creating a new exporter helper. -func withUnmarshaler(unmarshaler exporterqueue.Unmarshaler[Request]) Option { - return func(o *baseExporter) error { - o.unmarshaler = unmarshaler - return nil - } -} - -// withBatchFuncs is used to set the functions for merging and splitting batches for OLTP-based exporters. -// It must be provided as the first option when creating a new exporter helper. -func withBatchFuncs(mf exporterbatcher.BatchMergeFunc[Request], msf exporterbatcher.BatchMergeSplitFunc[Request]) Option { - return func(o *baseExporter) error { - o.batchMergeFunc = mf - o.batchMergeSplitfunc = msf - return nil - } -} - -// baseExporter contains common fields between different exporter types. -type baseExporter struct { - component.StartFunc - component.ShutdownFunc - - signal component.DataType - - batchMergeFunc exporterbatcher.BatchMergeFunc[Request] - batchMergeSplitfunc exporterbatcher.BatchMergeSplitFunc[Request] - - marshaler exporterqueue.Marshaler[Request] - unmarshaler exporterqueue.Unmarshaler[Request] - - set exporter.Settings - obsrep *obsReport - - // Message for the user to be added with an export failure message. - exportFailureMessage string - - // Chain of senders that the exporter helper applies before passing the data to the actual exporter. - // The data is handled by each sender in the respective order starting from the queueSender. - // Most of the senders are optional, and initialized with a no-op path-through sender. - batchSender requestSender - queueSender requestSender - obsrepSender requestSender - retrySender requestSender - timeoutSender *timeoutSender // timeoutSender is always initialized. - - consumerOptions []consumer.Option - - queueCfg exporterqueue.Config - queueFactory exporterqueue.Factory[Request] - batcherCfg exporterbatcher.Config - batcherOpts []BatcherOption -} - -func newBaseExporter(set exporter.Settings, signal component.DataType, osf obsrepSenderFactory, options ...Option) (*baseExporter, error) { - obsReport, err := newExporter(obsReportSettings{exporterID: set.ID, exporterCreateSettings: set, dataType: signal}) - if err != nil { - return nil, err - } - - be := &baseExporter{ - signal: signal, - - batchSender: &baseRequestSender{}, - queueSender: &baseRequestSender{}, - obsrepSender: osf(obsReport), - retrySender: &baseRequestSender{}, - timeoutSender: &timeoutSender{cfg: NewDefaultTimeoutConfig()}, - - set: set, - obsrep: obsReport, - } - - for _, op := range options { - err = multierr.Append(err, op(be)) - } - if err != nil { - return nil, err - } - - if be.batcherCfg.Enabled { - bs := newBatchSender(be.batcherCfg, be.set, be.batchMergeFunc, be.batchMergeSplitfunc) - for _, opt := range be.batcherOpts { - err = multierr.Append(err, opt(bs)) - } - if bs.mergeFunc == nil || bs.mergeSplitFunc == nil { - err = multierr.Append(err, fmt.Errorf("WithRequestBatchFuncs must be provided for the batcher applied to the request-based exporters")) - } - be.batchSender = bs - } - - if be.queueCfg.Enabled { - set := exporterqueue.Settings{ - DataType: be.signal, - ExporterSettings: be.set, - } - be.queueSender = newQueueSender(be.queueFactory(context.Background(), set, be.queueCfg), be.set, be.queueCfg.NumConsumers, be.exportFailureMessage, be.obsrep) - for _, op := range options { - err = multierr.Append(err, op(be)) - } - } - - if err != nil { - return nil, err - } - - be.connectSenders() - - if bs, ok := be.batchSender.(*batchSender); ok { - // If queue sender is enabled assign to the batch sender the same number of workers. - if qs, ok := be.queueSender.(*queueSender); ok { - bs.concurrencyLimit = int64(qs.numConsumers) - } - // Batcher sender mutates the data. - be.consumerOptions = append(be.consumerOptions, consumer.WithCapabilities(consumer.Capabilities{MutatesData: true})) - } - - return be, nil -} - -// send sends the request using the first sender in the chain. -func (be *baseExporter) send(ctx context.Context, req Request) error { - err := be.queueSender.send(ctx, req) - if err != nil { - be.set.Logger.Error("Exporting failed. Rejecting data."+be.exportFailureMessage, - zap.Error(err), zap.Int("rejected_items", req.ItemsCount())) - } - return err -} - -// connectSenders connects the senders in the predefined order. -func (be *baseExporter) connectSenders() { - be.queueSender.setNextSender(be.batchSender) - be.batchSender.setNextSender(be.obsrepSender) - be.obsrepSender.setNextSender(be.retrySender) - be.retrySender.setNextSender(be.timeoutSender) -} - -func (be *baseExporter) Start(ctx context.Context, host component.Host) error { - // First start the wrapped exporter. - if err := be.StartFunc.Start(ctx, host); err != nil { - return err - } - - // If no error then start the batchSender. - if err := be.batchSender.Start(ctx, host); err != nil { - return err - } - - // Last start the queueSender. - return be.queueSender.Start(ctx, host) -} - -func (be *baseExporter) Shutdown(ctx context.Context) error { - return multierr.Combine( - // First shutdown the retry sender, so the queue sender can flush the queue without retries. - be.retrySender.Shutdown(ctx), - // Then shutdown the batch sender - be.batchSender.Shutdown(ctx), - // Then shutdown the queue sender. - be.queueSender.Shutdown(ctx), - // Last shutdown the wrapped exporter itself. - be.ShutdownFunc.Shutdown(ctx)) + return internal.WithBatcher(cfg, opts...) } diff --git a/exporter/exporterhelper/constants.go b/exporter/exporterhelper/constants.go index 74d7f0a4b7b..57829f08c04 100644 --- a/exporter/exporterhelper/constants.go +++ b/exporter/exporterhelper/constants.go @@ -24,8 +24,4 @@ var ( errNilMetricsConverter = errors.New("nil RequestFromMetricsFunc") // errNilLogsConverter is returned when a nil RequestFromLogsFunc is given. errNilLogsConverter = errors.New("nil RequestFromLogsFunc") - // errNilPushProfileData is returned when a nil PushProfiles is given. - errNilPushProfileData = errors.New("nil PushProfiles") - // errNilProfilesConverter is returned when a nil RequestFromProfilesFunc is given. - errNilProfilesConverter = errors.New("nil RequestFromProfilesFunc") ) diff --git a/exporter/exporterhelper/exporterhelper.go b/exporter/exporterhelper/exporterhelper.go index 0890ec71af1..d9e90d821d9 100644 --- a/exporter/exporterhelper/exporterhelper.go +++ b/exporter/exporterhelper/exporterhelper.go @@ -16,9 +16,3 @@ type Request = internal.Request // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. type RequestErrorHandler = internal.RequestErrorHandler - -// extractPartialRequest returns a new Request that may contain the items left to be sent -// if only some items failed to process and can be retried. Otherwise, it returns the original Request. -func extractPartialRequest(req Request, err error) Request { - return internal.ExtractPartialRequest(req, err) -} diff --git a/exporter/exporterhelper/exporterhelperprofiles/Makefile b/exporter/exporterhelper/exporterhelperprofiles/Makefile new file mode 100644 index 00000000000..bdd863a203b --- /dev/null +++ b/exporter/exporterhelper/exporterhelperprofiles/Makefile @@ -0,0 +1 @@ +include ../../../Makefile.Common diff --git a/exporter/exporterhelper/exporterhelperprofiles/constants.go b/exporter/exporterhelper/exporterhelperprofiles/constants.go new file mode 100644 index 00000000000..528f40eacaf --- /dev/null +++ b/exporter/exporterhelper/exporterhelperprofiles/constants.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package exporterhelperprofiles // import "go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles" + +import ( + "errors" +) + +var ( + // errNilConfig is returned when an empty name is given. + errNilConfig = errors.New("nil config") + // errNilLogger is returned when a logger is nil + errNilLogger = errors.New("nil logger") + // errNilPushProfileData is returned when a nil PushProfiles is given. + errNilPushProfileData = errors.New("nil PushProfiles") + // errNilProfilesConverter is returned when a nil RequestFromProfilesFunc is given. + errNilProfilesConverter = errors.New("nil RequestFromProfilesFunc") +) diff --git a/exporter/exporterhelper/exporterhelperprofiles/go.mod b/exporter/exporterhelper/exporterhelperprofiles/go.mod new file mode 100644 index 00000000000..842489b5f5a --- /dev/null +++ b/exporter/exporterhelper/exporterhelperprofiles/go.mod @@ -0,0 +1,94 @@ +module go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles + +go 1.22.0 + +require ( + github.com/stretchr/testify v1.9.0 + go.opentelemetry.io/collector/component v0.109.0 + go.opentelemetry.io/collector/component/componentprofiles v0.109.0 + go.opentelemetry.io/collector/config/configretry v1.15.0 + go.opentelemetry.io/collector/consumer v0.109.0 + go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/collector/consumer/consumerprofiles v0.109.0 + go.opentelemetry.io/collector/consumer/consumertest v0.109.0 + go.opentelemetry.io/collector/exporter v0.109.0 + go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 + go.opentelemetry.io/collector/pdata v1.15.0 + go.opentelemetry.io/collector/pdata/pprofile v0.109.0 + go.opentelemetry.io/collector/pdata/testdata v0.109.0 + go.opentelemetry.io/otel v1.30.0 + go.opentelemetry.io/otel/sdk v1.30.0 + go.opentelemetry.io/otel/trace v1.30.0 + go.uber.org/zap v1.27.0 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.20.3 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.59.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.109.0 // indirect + go.opentelemetry.io/collector/extension v0.109.0 // indirect + go.opentelemetry.io/collector/extension/experimental/storage v0.109.0 // indirect + go.opentelemetry.io/collector/receiver v0.109.0 // indirect + go.opentelemetry.io/collector/receiver/receiverprofiles v0.109.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.52.0 // indirect + go.opentelemetry.io/otel/metric v1.30.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.30.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/sys v0.25.0 // indirect + golang.org/x/text v0.17.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect + google.golang.org/grpc v1.66.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace go.opentelemetry.io/collector/consumer/consumertest => ../../../consumer/consumertest + +replace go.opentelemetry.io/collector/pdata/pprofile => ../../../pdata/pprofile + +replace go.opentelemetry.io/collector/pdata/testdata => ../../../pdata/testdata + +replace go.opentelemetry.io/collector/exporter => ../../ + +replace go.opentelemetry.io/collector/consumer => ../../../consumer + +replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../../consumer/consumererror/consumererrorprofiles + +replace go.opentelemetry.io/collector/receiver => ../../../receiver + +replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../../../consumer/consumerprofiles + +replace go.opentelemetry.io/collector/component => ../../../component + +replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../../receiver/receiverprofiles + +replace go.opentelemetry.io/collector/extension => ../../../extension + +replace go.opentelemetry.io/collector/pdata => ../../../pdata + +replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../../exporterprofiles + +replace go.opentelemetry.io/collector/config/configtelemetry => ../../../config/configtelemetry + +replace go.opentelemetry.io/collector/config/configretry => ../../../config/configretry + +replace go.opentelemetry.io/collector/component/componentprofiles => ../../../component/componentprofiles + +replace go.opentelemetry.io/collector/extension/experimental/storage => ../../../extension/experimental/storage diff --git a/exporter/exporterhelper/exporterhelperprofiles/go.sum b/exporter/exporterhelper/exporterhelperprofiles/go.sum new file mode 100644 index 00000000000..369679016e1 --- /dev/null +++ b/exporter/exporterhelper/exporterhelperprofiles/go.sum @@ -0,0 +1,118 @@ +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.20.3 h1:oPksm4K8B+Vt35tUhw6GbSNSgVlVSBH0qELP/7u83l4= +github.com/prometheus/client_golang v1.20.3/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= +github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= +go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= +go.opentelemetry.io/otel/exporters/prometheus v0.52.0 h1:kmU3H0b9ufFSi8IQCcxack+sWUblKkFbqWYs6YiACGQ= +go.opentelemetry.io/otel/exporters/prometheus v0.52.0/go.mod h1:+wsAp2+JhuGXX7YRkjlkx6hyWY3ogFPfNA4x3nyiAh0= +go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= +go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= +go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= +go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM= +go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y= +go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= +go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= +google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/exporter/exporterhelper/profiles.go b/exporter/exporterhelper/exporterhelperprofiles/profiles.go similarity index 71% rename from exporter/exporterhelper/profiles.go rename to exporter/exporterhelper/exporterhelperprofiles/profiles.go index 0c079a64a72..67bf81ace88 100644 --- a/exporter/exporterhelper/profiles.go +++ b/exporter/exporterhelper/exporterhelperprofiles/profiles.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" +package exporterhelperprofiles // import "go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles" import ( "context" @@ -15,6 +15,8 @@ import ( "go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles" "go.opentelemetry.io/collector/consumer/consumerprofiles" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterprofiles" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/pdata/pprofile" @@ -28,15 +30,15 @@ type profilesRequest struct { pusher consumerprofiles.ConsumeProfilesFunc } -func newProfilesRequest(pd pprofile.Profiles, pusher consumerprofiles.ConsumeProfilesFunc) Request { +func newProfilesRequest(pd pprofile.Profiles, pusher consumerprofiles.ConsumeProfilesFunc) exporterhelper.Request { return &profilesRequest{ pd: pd, pusher: pusher, } } -func newProfileRequestUnmarshalerFunc(pusher consumerprofiles.ConsumeProfilesFunc) exporterqueue.Unmarshaler[Request] { - return func(bytes []byte) (Request, error) { +func newProfileRequestUnmarshalerFunc(pusher consumerprofiles.ConsumeProfilesFunc) exporterqueue.Unmarshaler[exporterhelper.Request] { + return func(bytes []byte) (exporterhelper.Request, error) { profiles, err := profilesUnmarshaler.UnmarshalProfiles(bytes) if err != nil { return nil, err @@ -45,11 +47,11 @@ func newProfileRequestUnmarshalerFunc(pusher consumerprofiles.ConsumeProfilesFun } } -func profilesRequestMarshaler(req Request) ([]byte, error) { +func profilesRequestMarshaler(req exporterhelper.Request) ([]byte, error) { return profilesMarshaler.MarshalProfiles(req.(*profilesRequest).pd) } -func (req *profilesRequest) OnError(err error) Request { +func (req *profilesRequest) OnError(err error) exporterhelper.Request { var profileError consumererrorprofiles.Profiles if errors.As(err, &profileError) { return newProfilesRequest(profileError.Data(), req.pusher) @@ -66,7 +68,7 @@ func (req *profilesRequest) ItemsCount() int { } type profileExporter struct { - *baseExporter + *internal.BaseExporter consumerprofiles.Profiles } @@ -76,7 +78,7 @@ func NewProfilesExporter( set exporter.Settings, cfg component.Config, pusher consumerprofiles.ConsumeProfilesFunc, - options ...Option, + options ...exporterhelper.Option, ) (exporterprofiles.Profiles, error) { if cfg == nil { return nil, errNilConfig @@ -84,9 +86,9 @@ func NewProfilesExporter( if pusher == nil { return nil, errNilPushProfileData } - profilesOpts := []Option{ - withMarshaler(profilesRequestMarshaler), withUnmarshaler(newProfileRequestUnmarshalerFunc(pusher)), - withBatchFuncs(mergeProfiles, mergeSplitProfiles), + profilesOpts := []exporterhelper.Option{ + internal.WithMarshaler(profilesRequestMarshaler), internal.WithUnmarshaler(newProfileRequestUnmarshalerFunc(pusher)), + internal.WithBatchFuncs(mergeProfiles, mergeSplitProfiles), } return NewProfilesRequestExporter(ctx, set, requestFromProfiles(pusher), append(profilesOpts, options...)...) } @@ -94,11 +96,11 @@ func NewProfilesExporter( // RequestFromProfilesFunc converts pprofile.Profiles into a user-defined Request. // Experimental: This API is at the early stage of development and may change without backward compatibility // until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. -type RequestFromProfilesFunc func(context.Context, pprofile.Profiles) (Request, error) +type RequestFromProfilesFunc func(context.Context, pprofile.Profiles) (exporterhelper.Request, error) // requestFromProfiles returns a RequestFromProfilesFunc that converts pprofile.Profiles into a Request. func requestFromProfiles(pusher consumerprofiles.ConsumeProfilesFunc) RequestFromProfilesFunc { - return func(_ context.Context, profiles pprofile.Profiles) (Request, error) { + return func(_ context.Context, profiles pprofile.Profiles) (exporterhelper.Request, error) { return newProfilesRequest(profiles, pusher), nil } } @@ -110,7 +112,7 @@ func NewProfilesRequestExporter( _ context.Context, set exporter.Settings, converter RequestFromProfilesFunc, - options ...Option, + options ...exporterhelper.Option, ) (exporterprofiles.Profiles, error) { if set.Logger == nil { return nil, errNilLogger @@ -120,7 +122,7 @@ func NewProfilesRequestExporter( return nil, errNilProfilesConverter } - be, err := newBaseExporter(set, componentprofiles.DataTypeProfiles, newProfilesExporterWithObservability, options...) + be, err := internal.NewBaseExporter(set, componentprofiles.DataTypeProfiles, newProfilesExporterWithObservability, options...) if err != nil { return nil, err } @@ -133,29 +135,29 @@ func NewProfilesRequestExporter( zap.Error(err)) return consumererror.NewPermanent(cErr) } - return be.send(ctx, req) - }, be.consumerOptions...) + return be.Send(ctx, req) + }, be.ConsumerOptions...) return &profileExporter{ - baseExporter: be, + BaseExporter: be, Profiles: tc, }, err } type profilesExporterWithObservability struct { - baseRequestSender - obsrep *obsReport + internal.BaseRequestSender + obsrep *internal.ObsReport } -func newProfilesExporterWithObservability(obsrep *obsReport) requestSender { +func newProfilesExporterWithObservability(obsrep *internal.ObsReport) internal.RequestSender { return &profilesExporterWithObservability{obsrep: obsrep} } -func (tewo *profilesExporterWithObservability) send(ctx context.Context, req Request) error { - c := tewo.obsrep.startProfilesOp(ctx) +func (tewo *profilesExporterWithObservability) Send(ctx context.Context, req exporterhelper.Request) error { + c := tewo.obsrep.StartProfilesOp(ctx) numSamples := req.ItemsCount() // Forward the data to the next consumer (this pusher is the next). - err := tewo.nextSender.send(c, req) - tewo.obsrep.endProfilesOp(c, numSamples, err) + err := tewo.NextSender.Send(c, req) + tewo.obsrep.EndProfilesOp(c, numSamples, err) return err } diff --git a/exporter/exporterhelper/profiles_batch.go b/exporter/exporterhelper/exporterhelperprofiles/profiles_batch.go similarity index 88% rename from exporter/exporterhelper/profiles_batch.go rename to exporter/exporterhelper/exporterhelperprofiles/profiles_batch.go index 7c528295ab2..0db7d879e20 100644 --- a/exporter/exporterhelper/profiles_batch.go +++ b/exporter/exporterhelper/exporterhelperprofiles/profiles_batch.go @@ -1,18 +1,19 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" +package exporterhelperprofiles // import "go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles" import ( "context" "errors" "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/pdata/pprofile" ) // mergeProfiles merges two profiles requests into one. -func mergeProfiles(_ context.Context, r1 Request, r2 Request) (Request, error) { +func mergeProfiles(_ context.Context, r1 exporterhelper.Request, r2 exporterhelper.Request) (exporterhelper.Request, error) { tr1, ok1 := r1.(*profilesRequest) tr2, ok2 := r2.(*profilesRequest) if !ok1 || !ok2 { @@ -23,13 +24,13 @@ func mergeProfiles(_ context.Context, r1 Request, r2 Request) (Request, error) { } // mergeSplitProfiles splits and/or merges the profiles into multiple requests based on the MaxSizeConfig. -func mergeSplitProfiles(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r1 Request, r2 Request) ([]Request, error) { +func mergeSplitProfiles(_ context.Context, cfg exporterbatcher.MaxSizeConfig, r1 exporterhelper.Request, r2 exporterhelper.Request) ([]exporterhelper.Request, error) { var ( - res []Request + res []exporterhelper.Request destReq *profilesRequest capacityLeft = cfg.MaxSizeItems ) - for _, req := range []Request{r1, r2} { + for _, req := range []exporterhelper.Request{r1, r2} { if req == nil { continue } diff --git a/exporter/exporterhelper/profiles_batch_test.go b/exporter/exporterhelper/exporterhelperprofiles/profiles_batch_test.go similarity index 89% rename from exporter/exporterhelper/profiles_batch_test.go rename to exporter/exporterhelper/exporterhelperprofiles/profiles_batch_test.go index 97e03120f75..7a62fda9ece 100644 --- a/exporter/exporterhelper/profiles_batch_test.go +++ b/exporter/exporterhelper/exporterhelperprofiles/profiles_batch_test.go @@ -1,16 +1,21 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper +package exporterhelperprofiles import ( "context" + "fmt" + "os" "testing" "github.com/stretchr/testify/assert" + "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/pdata/pprofile" + "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/pdata/testdata" ) @@ -19,6 +24,7 @@ func TestMergeProfiles(t *testing.T) { pr2 := &profilesRequest{pd: testdata.GenerateProfiles(3)} res, err := mergeProfiles(context.Background(), pr1, pr2) assert.NoError(t, err) + fmt.Fprintf(os.Stdout, "%#v\n", res.(*profilesRequest).pd) assert.Equal(t, 5, res.(*profilesRequest).pd.SampleCount()) } @@ -33,8 +39,8 @@ func TestMergeSplitProfiles(t *testing.T) { tests := []struct { name string cfg exporterbatcher.MaxSizeConfig - pr1 Request - pr2 Request + pr1 exporterhelper.Request + pr2 exporterhelper.Request expected []*profilesRequest }{ { @@ -152,3 +158,16 @@ func TestExtractProfiles(t *testing.T) { assert.Equal(t, 10-i, ld.SampleCount()) } } + +type tracesRequest struct { + td ptrace.Traces + pusher consumer.ConsumeTracesFunc +} + +func (req *tracesRequest) Export(ctx context.Context) error { + return req.pusher(ctx, req.td) +} + +func (req *tracesRequest) ItemsCount() int { + return req.td.SpanCount() +} diff --git a/exporter/exporterhelper/profiles_test.go b/exporter/exporterhelper/exporterhelperprofiles/profiles_test.go similarity index 86% rename from exporter/exporterhelper/profiles_test.go rename to exporter/exporterhelper/exporterhelperprofiles/profiles_test.go index 0c71a4be7c6..ebe3003fe7f 100644 --- a/exporter/exporterhelper/profiles_test.go +++ b/exporter/exporterhelper/exporterhelperprofiles/profiles_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper +package exporterhelperprofiles import ( "context" @@ -27,6 +27,7 @@ import ( "go.opentelemetry.io/collector/consumer/consumerprofiles" "go.opentelemetry.io/collector/consumer/consumertest" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper" "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterprofiles" "go.opentelemetry.io/collector/exporter/exportertest" @@ -50,7 +51,7 @@ func TestProfilesRequest(t *testing.T) { assert.EqualValues( t, newProfilesRequest(pprofile.NewProfiles(), nil), - lr.(RequestErrorHandler).OnError(profileErr), + lr.(exporterhelper.RequestErrorHandler).OnError(profileErr), ) } @@ -67,7 +68,7 @@ func TestProfilesExporter_NilLogger(t *testing.T) { } func TestProfilesRequestExporter_NilLogger(t *testing.T) { - le, err := NewProfilesRequestExporter(context.Background(), exporter.Settings{}, (&fakeRequestConverter{}).requestFromProfilesFunc) + le, err := NewProfilesRequestExporter(context.Background(), exporter.Settings{}, (&internal.FakeRequestConverter{}).RequestFromProfilesFunc) require.Nil(t, le) require.Equal(t, errNilLogger, err) } @@ -99,7 +100,7 @@ func TestProfilesExporter_Default(t *testing.T) { func TestProfilesRequestExporter_Default(t *testing.T) { ld := pprofile.NewProfiles() le, err := NewProfilesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromProfilesFunc) + (&internal.FakeRequestConverter{}).RequestFromProfilesFunc) assert.NotNil(t, le) assert.NoError(t, err) @@ -111,7 +112,7 @@ func TestProfilesRequestExporter_Default(t *testing.T) { func TestProfilesExporter_WithCapabilities(t *testing.T) { capabilities := consumer.Capabilities{MutatesData: true} - le, err := NewProfilesExporter(context.Background(), exportertest.NewNopSettings(), &fakeProfilesExporterConfig, newPushProfilesData(nil), WithCapabilities(capabilities)) + le, err := NewProfilesExporter(context.Background(), exportertest.NewNopSettings(), &fakeProfilesExporterConfig, newPushProfilesData(nil), exporterhelper.WithCapabilities(capabilities)) require.NoError(t, err) require.NotNil(t, le) @@ -121,7 +122,7 @@ func TestProfilesExporter_WithCapabilities(t *testing.T) { func TestProfilesRequestExporter_WithCapabilities(t *testing.T) { capabilities := consumer.Capabilities{MutatesData: true} le, err := NewProfilesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromProfilesFunc, WithCapabilities(capabilities)) + (&internal.FakeRequestConverter{}).RequestFromProfilesFunc, exporterhelper.WithCapabilities(capabilities)) require.NoError(t, err) require.NotNil(t, le) @@ -141,7 +142,7 @@ func TestProfilesRequestExporter_Default_ConvertError(t *testing.T) { ld := pprofile.NewProfiles() want := errors.New("convert_error") le, err := NewProfilesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{profilesError: want}).requestFromProfilesFunc) + (&internal.FakeRequestConverter{ProfilesError: want}).RequestFromProfilesFunc) require.NoError(t, err) require.NotNil(t, le) require.Equal(t, consumererror.NewPermanent(want), le.ConsumeProfiles(context.Background(), ld)) @@ -151,24 +152,24 @@ func TestProfilesRequestExporter_Default_ExportError(t *testing.T) { ld := pprofile.NewProfiles() want := errors.New("export_error") le, err := NewProfilesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{requestError: want}).requestFromProfilesFunc) + (&internal.FakeRequestConverter{RequestError: want}).RequestFromProfilesFunc) require.NoError(t, err) require.NotNil(t, le) require.Equal(t, want, le.ConsumeProfiles(context.Background(), ld)) } func TestProfilesExporter_WithPersistentQueue(t *testing.T) { - qCfg := NewDefaultQueueSettings() + qCfg := exporterhelper.NewDefaultQueueConfig() storageID := component.MustNewIDWithName("file_storage", "storage") qCfg.StorageID = &storageID rCfg := configretry.NewDefaultBackOffConfig() ts := consumertest.ProfilesSink{} set := exportertest.NewNopSettings() set.ID = component.MustNewIDWithName("test_profiles", "with_persistent_queue") - te, err := NewProfilesExporter(context.Background(), set, &fakeProfilesExporterConfig, ts.ConsumeProfiles, WithRetry(rCfg), WithQueue(qCfg)) + te, err := NewProfilesExporter(context.Background(), set, &fakeProfilesExporterConfig, ts.ConsumeProfiles, exporterhelper.WithRetry(rCfg), exporterhelper.WithQueue(qCfg)) require.NoError(t, err) - host := &mockHost{ext: map[component.ID]component.Component{ + host := &internal.MockHost{Ext: map[component.ID]component.Component{ storageID: queue.NewMockStorageExtension(nil), }} require.NoError(t, te.Start(context.Background(), host)) @@ -201,7 +202,7 @@ func TestProfilesRequestExporter_WithSpan(t *testing.T) { otel.SetTracerProvider(set.TracerProvider) defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) - le, err := NewProfilesRequestExporter(context.Background(), set, (&fakeRequestConverter{}).requestFromProfilesFunc) + le, err := NewProfilesRequestExporter(context.Background(), set, (&internal.FakeRequestConverter{}).RequestFromProfilesFunc) require.NoError(t, err) require.NotNil(t, le) checkWrapSpanForProfilesExporter(t, sr, set.TracerProvider.Tracer("test"), le, nil, 1) @@ -229,7 +230,7 @@ func TestProfilesRequestExporter_WithSpan_ReturnError(t *testing.T) { defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) want := errors.New("my_error") - le, err := NewProfilesRequestExporter(context.Background(), set, (&fakeRequestConverter{requestError: want}).requestFromProfilesFunc) + le, err := NewProfilesRequestExporter(context.Background(), set, (&internal.FakeRequestConverter{RequestError: want}).RequestFromProfilesFunc) require.NoError(t, err) require.NotNil(t, le) checkWrapSpanForProfilesExporter(t, sr, set.TracerProvider.Tracer("test"), le, want, 1) @@ -239,7 +240,7 @@ func TestProfilesExporter_WithShutdown(t *testing.T) { shutdownCalled := false shutdown := func(context.Context) error { shutdownCalled = true; return nil } - le, err := NewProfilesExporter(context.Background(), exportertest.NewNopSettings(), &fakeProfilesExporterConfig, newPushProfilesData(nil), WithShutdown(shutdown)) + le, err := NewProfilesExporter(context.Background(), exportertest.NewNopSettings(), &fakeProfilesExporterConfig, newPushProfilesData(nil), exporterhelper.WithShutdown(shutdown)) assert.NotNil(t, le) assert.NoError(t, err) @@ -252,7 +253,7 @@ func TestProfilesRequestExporter_WithShutdown(t *testing.T) { shutdown := func(context.Context) error { shutdownCalled = true; return nil } le, err := NewProfilesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromProfilesFunc, WithShutdown(shutdown)) + (&internal.FakeRequestConverter{}).RequestFromProfilesFunc, exporterhelper.WithShutdown(shutdown)) assert.NotNil(t, le) assert.NoError(t, err) @@ -264,11 +265,11 @@ func TestProfilesExporter_WithShutdown_ReturnError(t *testing.T) { want := errors.New("my_error") shutdownErr := func(context.Context) error { return want } - le, err := NewProfilesExporter(context.Background(), exportertest.NewNopSettings(), &fakeProfilesExporterConfig, newPushProfilesData(nil), WithShutdown(shutdownErr)) + le, err := NewProfilesExporter(context.Background(), exportertest.NewNopSettings(), &fakeProfilesExporterConfig, newPushProfilesData(nil), exporterhelper.WithShutdown(shutdownErr)) assert.NotNil(t, le) assert.NoError(t, err) - assert.Equal(t, le.Shutdown(context.Background()), want) + assert.Equal(t, want, le.Shutdown(context.Background())) } func TestProfilesRequestExporter_WithShutdown_ReturnError(t *testing.T) { @@ -276,11 +277,11 @@ func TestProfilesRequestExporter_WithShutdown_ReturnError(t *testing.T) { shutdownErr := func(context.Context) error { return want } le, err := NewProfilesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromProfilesFunc, WithShutdown(shutdownErr)) + (&internal.FakeRequestConverter{}).RequestFromProfilesFunc, exporterhelper.WithShutdown(shutdownErr)) assert.NotNil(t, le) assert.NoError(t, err) - assert.Equal(t, le.Shutdown(context.Background()), want) + assert.Equal(t, want, le.Shutdown(context.Background())) } func newPushProfilesData(retError error) consumerprofiles.ConsumeProfilesFunc { @@ -299,7 +300,7 @@ func generateProfilesTraffic(t *testing.T, tracer trace.Tracer, le exporterprofi } func checkWrapSpanForProfilesExporter(t *testing.T, sr *tracetest.SpanRecorder, tracer trace.Tracer, le exporterprofiles.Profiles, - wantError error, numProfileRecords int64) { // nolint: unparam + wantError error, numSampleRecords int64) { // nolint: unparam const numRequests = 5 generateProfilesTraffic(t, tracer, le, numRequests, wantError) @@ -311,15 +312,15 @@ func checkWrapSpanForProfilesExporter(t *testing.T, sr *tracetest.SpanRecorder, require.Equalf(t, fakeProfilesParentSpanName, parentSpan.Name(), "SpanData %v", parentSpan) for _, sd := range gotSpanData[:numRequests] { require.Equalf(t, parentSpan.SpanContext(), sd.Parent(), "Exporter span not a child\nSpanData %v", sd) - checkStatus(t, sd, wantError) + internal.CheckStatus(t, sd, wantError) - sentProfileRecords := numProfileRecords - var failedToSendProfileRecords int64 + sentSampleRecords := numSampleRecords + var failedToSendSampleRecords int64 if wantError != nil { - sentProfileRecords = 0 - failedToSendProfileRecords = numProfileRecords + sentSampleRecords = 0 + failedToSendSampleRecords = numSampleRecords } - require.Containsf(t, sd.Attributes(), attribute.KeyValue{Key: internal.SentSamplesKey, Value: attribute.Int64Value(sentProfileRecords)}, "SpanData %v", sd) - require.Containsf(t, sd.Attributes(), attribute.KeyValue{Key: internal.FailedToSendSamplesKey, Value: attribute.Int64Value(failedToSendProfileRecords)}, "SpanData %v", sd) + require.Containsf(t, sd.Attributes(), attribute.KeyValue{Key: internal.SentSamplesKey, Value: attribute.Int64Value(sentSampleRecords)}, "SpanData %v", sd) + require.Containsf(t, sd.Attributes(), attribute.KeyValue{Key: internal.FailedToSendSamplesKey, Value: attribute.Int64Value(failedToSendSampleRecords)}, "SpanData %v", sd) } } diff --git a/exporter/exporterhelper/internal/base_exporter.go b/exporter/exporterhelper/internal/base_exporter.go new file mode 100644 index 00000000000..922dbc9b34a --- /dev/null +++ b/exporter/exporterhelper/internal/base_exporter.go @@ -0,0 +1,341 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "go.opentelemetry.io/otel/codes" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.uber.org/multierr" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/consumer" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/exporterqueue" // BaseExporter contains common fields between different exporter types. + "go.opentelemetry.io/collector/exporter/internal" +) + +type ObsrepSenderFactory = func(obsrep *ObsReport) RequestSender + +// Option apply changes to BaseExporter. +type Option func(*BaseExporter) error + +// BatcherOption apply changes to batcher sender. +type BatcherOption func(*BatchSender) error + +type BaseExporter struct { + component.StartFunc + component.ShutdownFunc + + Signal component.DataType + + BatchMergeFunc exporterbatcher.BatchMergeFunc[internal.Request] + BatchMergeSplitfunc exporterbatcher.BatchMergeSplitFunc[internal.Request] + + Marshaler exporterqueue.Marshaler[internal.Request] + Unmarshaler exporterqueue.Unmarshaler[internal.Request] + + Set exporter.Settings + Obsrep *ObsReport + + // Message for the user to be added with an export failure message. + ExportFailureMessage string + + // Chain of senders that the exporter helper applies before passing the data to the actual exporter. + // The data is handled by each sender in the respective order starting from the queueSender. + // Most of the senders are optional, and initialized with a no-op path-through sender. + BatchSender RequestSender + QueueSender RequestSender + ObsrepSender RequestSender + RetrySender RequestSender + TimeoutSender *TimeoutSender // TimeoutSender is always initialized. + + ConsumerOptions []consumer.Option + + QueueCfg exporterqueue.Config + QueueFactory exporterqueue.Factory[internal.Request] + BatcherCfg exporterbatcher.Config + BatcherOpts []BatcherOption +} + +func NewBaseExporter(set exporter.Settings, signal component.DataType, osf ObsrepSenderFactory, options ...Option) (*BaseExporter, error) { + obsReport, err := NewExporter(ObsReportSettings{ExporterID: set.ID, ExporterCreateSettings: set, DataType: signal}) + if err != nil { + return nil, err + } + + be := &BaseExporter{ + Signal: signal, + + BatchSender: &BaseRequestSender{}, + QueueSender: &BaseRequestSender{}, + ObsrepSender: osf(obsReport), + RetrySender: &BaseRequestSender{}, + TimeoutSender: &TimeoutSender{cfg: NewDefaultTimeoutConfig()}, + + Set: set, + Obsrep: obsReport, + } + + for _, op := range options { + err = multierr.Append(err, op(be)) + } + if err != nil { + return nil, err + } + + if be.BatcherCfg.Enabled { + bs := NewBatchSender(be.BatcherCfg, be.Set, be.BatchMergeFunc, be.BatchMergeSplitfunc) + for _, opt := range be.BatcherOpts { + err = multierr.Append(err, opt(bs)) + } + if bs.mergeFunc == nil || bs.mergeSplitFunc == nil { + err = multierr.Append(err, fmt.Errorf("WithRequestBatchFuncs must be provided for the batcher applied to the request-based exporters")) + } + be.BatchSender = bs + } + + if be.QueueCfg.Enabled { + set := exporterqueue.Settings{ + DataType: be.Signal, + ExporterSettings: be.Set, + } + be.QueueSender = NewQueueSender(be.QueueFactory(context.Background(), set, be.QueueCfg), be.Set, be.QueueCfg.NumConsumers, be.ExportFailureMessage, be.Obsrep) + for _, op := range options { + err = multierr.Append(err, op(be)) + } + } + + if err != nil { + return nil, err + } + + be.connectSenders() + + if bs, ok := be.BatchSender.(*BatchSender); ok { + // If queue sender is enabled assign to the batch sender the same number of workers. + if qs, ok := be.QueueSender.(*QueueSender); ok { + bs.concurrencyLimit = int64(qs.numConsumers) + } + // Batcher sender mutates the data. + be.ConsumerOptions = append(be.ConsumerOptions, consumer.WithCapabilities(consumer.Capabilities{MutatesData: true})) + } + + return be, nil +} + +// send sends the request using the first sender in the chain. +func (be *BaseExporter) Send(ctx context.Context, req internal.Request) error { + err := be.QueueSender.Send(ctx, req) + if err != nil { + be.Set.Logger.Error("Exporting failed. Rejecting data."+be.ExportFailureMessage, + zap.Error(err), zap.Int("rejected_items", req.ItemsCount())) + } + return err +} + +// connectSenders connects the senders in the predefined order. +func (be *BaseExporter) connectSenders() { + be.QueueSender.SetNextSender(be.BatchSender) + be.BatchSender.SetNextSender(be.ObsrepSender) + be.ObsrepSender.SetNextSender(be.RetrySender) + be.RetrySender.SetNextSender(be.TimeoutSender) +} + +func (be *BaseExporter) Start(ctx context.Context, host component.Host) error { + // First start the wrapped exporter. + if err := be.StartFunc.Start(ctx, host); err != nil { + return err + } + + // If no error then start the BatchSender. + if err := be.BatchSender.Start(ctx, host); err != nil { + return err + } + + // Last start the queueSender. + return be.QueueSender.Start(ctx, host) +} + +func (be *BaseExporter) Shutdown(ctx context.Context) error { + return multierr.Combine( + // First shutdown the retry sender, so the queue sender can flush the queue without retries. + be.RetrySender.Shutdown(ctx), + // Then shutdown the batch sender + be.BatchSender.Shutdown(ctx), + // Then shutdown the queue sender. + be.QueueSender.Shutdown(ctx), + // Last shutdown the wrapped exporter itself. + be.ShutdownFunc.Shutdown(ctx)) +} + +// WithStart overrides the default Start function for an exporter. +// The default start function does nothing and always returns nil. +func WithStart(start component.StartFunc) Option { + return func(o *BaseExporter) error { + o.StartFunc = start + return nil + } +} + +// WithShutdown overrides the default Shutdown function for an exporter. +// The default shutdown function does nothing and always returns nil. +func WithShutdown(shutdown component.ShutdownFunc) Option { + return func(o *BaseExporter) error { + o.ShutdownFunc = shutdown + return nil + } +} + +// WithTimeout overrides the default TimeoutConfig for an exporter. +// The default TimeoutConfig is 5 seconds. +func WithTimeout(timeoutConfig TimeoutConfig) Option { + return func(o *BaseExporter) error { + o.TimeoutSender.cfg = timeoutConfig + return nil + } +} + +// WithRetry overrides the default configretry.BackOffConfig for an exporter. +// The default configretry.BackOffConfig is to disable retries. +func WithRetry(config configretry.BackOffConfig) Option { + return func(o *BaseExporter) error { + if !config.Enabled { + o.ExportFailureMessage += " Try enabling retry_on_failure config option to retry on retryable errors." + return nil + } + o.RetrySender = newRetrySender(config, o.Set) + return nil + } +} + +// WithQueue overrides the default QueueConfig for an exporter. +// The default QueueConfig is to disable queueing. +// This option cannot be used with the new exporter helpers New[Traces|Metrics|Logs]RequestExporter. +func WithQueue(config QueueConfig) Option { + return func(o *BaseExporter) error { + if o.Marshaler == nil || o.Unmarshaler == nil { + return fmt.Errorf("WithQueue option is not available for the new request exporters, use WithRequestQueue instead") + } + if !config.Enabled { + o.ExportFailureMessage += " Try enabling sending_queue to survive temporary failures." + return nil + } + qf := exporterqueue.NewPersistentQueueFactory[internal.Request](config.StorageID, exporterqueue.PersistentQueueSettings[internal.Request]{ + Marshaler: o.Marshaler, + Unmarshaler: o.Unmarshaler, + }) + q := qf(context.Background(), exporterqueue.Settings{ + DataType: o.Signal, + ExporterSettings: o.Set, + }, exporterqueue.Config{ + Enabled: config.Enabled, + NumConsumers: config.NumConsumers, + QueueSize: config.QueueSize, + }) + o.QueueSender = NewQueueSender(q, o.Set, config.NumConsumers, o.ExportFailureMessage, o.Obsrep) + return nil + } +} + +// WithRequestQueue enables queueing for an exporter. +// This option should be used with the new exporter helpers New[Traces|Metrics|Logs]RequestExporter. +// Experimental: This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +func WithRequestQueue(cfg exporterqueue.Config, queueFactory exporterqueue.Factory[internal.Request]) Option { + return func(o *BaseExporter) error { + if o.Marshaler != nil || o.Unmarshaler != nil { + return fmt.Errorf("WithRequestQueue option must be used with the new request exporters only, use WithQueue instead") + } + if !cfg.Enabled { + o.ExportFailureMessage += " Try enabling sending_queue to survive temporary failures." + return nil + } + o.QueueCfg = cfg + o.QueueFactory = queueFactory + return nil + } +} + +// WithCapabilities overrides the default Capabilities() function for a Consumer. +// The default is non-mutable data. +// TODO: Verify if we can change the default to be mutable as we do for processors. +func WithCapabilities(capabilities consumer.Capabilities) Option { + return func(o *BaseExporter) error { + o.ConsumerOptions = append(o.ConsumerOptions, consumer.WithCapabilities(capabilities)) + return nil + } +} + +// WithRequestBatchFuncs sets the functions for merging and splitting batches for an exporter built for custom request types. +func WithRequestBatchFuncs(mf exporterbatcher.BatchMergeFunc[internal.Request], msf exporterbatcher.BatchMergeSplitFunc[internal.Request]) BatcherOption { + return func(bs *BatchSender) error { + if mf == nil || msf == nil { + return fmt.Errorf("WithRequestBatchFuncs must be provided with non-nil functions") + } + if bs.mergeFunc != nil || bs.mergeSplitFunc != nil { + return fmt.Errorf("WithRequestBatchFuncs can only be used once with request-based exporters") + } + bs.mergeFunc = mf + bs.mergeSplitFunc = msf + return nil + } +} + +// WithBatcher enables batching for an exporter based on custom request types. +// For now, it can be used only with the New[Traces|Metrics|Logs]RequestExporter exporter helpers and +// WithRequestBatchFuncs provided. +// This API is at the early stage of development and may change without backward compatibility +// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved. +func WithBatcher(cfg exporterbatcher.Config, opts ...BatcherOption) Option { + return func(o *BaseExporter) error { + o.BatcherCfg = cfg + o.BatcherOpts = opts + return nil + } +} + +// WithMarshaler is used to set the request marshaler for the new exporter helper. +// It must be provided as the first option when creating a new exporter helper. +func WithMarshaler(marshaler exporterqueue.Marshaler[internal.Request]) Option { + return func(o *BaseExporter) error { + o.Marshaler = marshaler + return nil + } +} + +// withUnmarshaler is used to set the request unmarshaler for the new exporter helper. +// It must be provided as the first option when creating a new exporter helper. +func WithUnmarshaler(unmarshaler exporterqueue.Unmarshaler[internal.Request]) Option { + return func(o *BaseExporter) error { + o.Unmarshaler = unmarshaler + return nil + } +} + +// withBatchFuncs is used to set the functions for merging and splitting batches for OLTP-based exporters. +// It must be provided as the first option when creating a new exporter helper. +func WithBatchFuncs(mf exporterbatcher.BatchMergeFunc[internal.Request], msf exporterbatcher.BatchMergeSplitFunc[internal.Request]) Option { + return func(o *BaseExporter) error { + o.BatchMergeFunc = mf + o.BatchMergeSplitfunc = msf + return nil + } +} + +func CheckStatus(t *testing.T, sd sdktrace.ReadOnlySpan, err error) { + if err != nil { + require.Equal(t, codes.Error, sd.Status().Code, "SpanData %v", sd) + require.Equal(t, err.Error(), sd.Status().Description, "SpanData %v", sd) + } else { + require.Equal(t, codes.Unset, sd.Status().Code, "SpanData %v", sd) + } +} diff --git a/exporter/exporterhelper/common_test.go b/exporter/exporterhelper/internal/base_exporter_test.go similarity index 69% rename from exporter/exporterhelper/common_test.go rename to exporter/exporterhelper/internal/base_exporter_test.go index 512233ae1c7..f9c5975a171 100644 --- a/exporter/exporterhelper/common_test.go +++ b/exporter/exporterhelper/internal/base_exporter_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper +package internal import ( "context" @@ -9,8 +9,6 @@ import ( "testing" "github.com/stretchr/testify/require" - "go.opentelemetry.io/otel/codes" - sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" @@ -20,6 +18,7 @@ import ( "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/exporter/internal" ) var ( @@ -33,12 +32,12 @@ var ( }() ) -func newNoopObsrepSender(*obsReport) requestSender { - return &baseRequestSender{} +func newNoopObsrepSender(*ObsReport) RequestSender { + return &BaseRequestSender{} } func TestBaseExporter(t *testing.T) { - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender) + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) require.NoError(t, be.Shutdown(context.Background())) @@ -46,7 +45,7 @@ func TestBaseExporter(t *testing.T) { func TestBaseExporterWithOptions(t *testing.T) { want := errors.New("my error") - be, err := newBaseExporter( + be, err := NewBaseExporter( defaultSettings, defaultDataType, newNoopObsrepSender, WithStart(func(context.Context, component.Host) error { return want }), WithShutdown(func(context.Context) error { return want }), @@ -57,29 +56,20 @@ func TestBaseExporterWithOptions(t *testing.T) { require.Equal(t, want, be.Shutdown(context.Background())) } -func checkStatus(t *testing.T, sd sdktrace.ReadOnlySpan, err error) { - if err != nil { - require.Equal(t, codes.Error, sd.Status().Code, "SpanData %v", sd) - require.Equal(t, err.Error(), sd.Status().Description, "SpanData %v", sd) - } else { - require.Equal(t, codes.Unset, sd.Status().Code, "SpanData %v", sd) - } -} - func TestQueueOptionsWithRequestExporter(t *testing.T) { - bs, err := newBaseExporter(exportertest.NewNopSettings(), defaultDataType, newNoopObsrepSender, + bs, err := NewBaseExporter(exportertest.NewNopSettings(), defaultDataType, newNoopObsrepSender, WithRetry(configretry.NewDefaultBackOffConfig())) require.NoError(t, err) - require.Nil(t, bs.marshaler) - require.Nil(t, bs.unmarshaler) - _, err = newBaseExporter(exportertest.NewNopSettings(), defaultDataType, newNoopObsrepSender, + require.Nil(t, bs.Marshaler) + require.Nil(t, bs.Unmarshaler) + _, err = NewBaseExporter(exportertest.NewNopSettings(), defaultDataType, newNoopObsrepSender, WithRetry(configretry.NewDefaultBackOffConfig()), WithQueue(NewDefaultQueueConfig())) require.Error(t, err) - _, err = newBaseExporter(exportertest.NewNopSettings(), defaultDataType, newNoopObsrepSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + _, err = NewBaseExporter(exportertest.NewNopSettings(), defaultDataType, newNoopObsrepSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(configretry.NewDefaultBackOffConfig()), - WithRequestQueue(exporterqueue.NewDefaultConfig(), exporterqueue.NewMemoryQueueFactory[Request]())) + WithRequestQueue(exporterqueue.NewDefaultConfig(), exporterqueue.NewMemoryQueueFactory[internal.Request]())) require.Error(t, err) } @@ -89,9 +79,9 @@ func TestBaseExporterLogging(t *testing.T) { set.Logger = zap.New(logger) rCfg := configretry.NewDefaultBackOffConfig() rCfg.Enabled = false - bs, err := newBaseExporter(set, defaultDataType, newNoopObsrepSender, WithRetry(rCfg)) + bs, err := NewBaseExporter(set, defaultDataType, newNoopObsrepSender, WithRetry(rCfg)) require.NoError(t, err) - sendErr := bs.send(context.Background(), newErrorRequest()) + sendErr := bs.Send(context.Background(), newErrorRequest()) require.Error(t, sendErr) require.Len(t, observed.FilterLevelExact(zap.ErrorLevel).All(), 1) diff --git a/exporter/exporterhelper/batch_sender.go b/exporter/exporterhelper/internal/batch_sender.go similarity index 81% rename from exporter/exporterhelper/batch_sender.go rename to exporter/exporterhelper/internal/batch_sender.go index 4d9635195e2..65d7e0965f7 100644 --- a/exporter/exporterhelper/batch_sender.go +++ b/exporter/exporterhelper/internal/batch_sender.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" import ( "context" @@ -14,18 +14,19 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/internal" ) -// batchSender is a component that places requests into batches before passing them to the downstream senders. +// BatchSender is a component that places requests into batches before passing them to the downstream senders. // Batches are sent out with any of the following conditions: // - batch size reaches cfg.MinSizeItems // - cfg.FlushTimeout is elapsed since the timestamp when the previous batch was sent out. // - concurrencyLimit is reached. -type batchSender struct { - baseRequestSender +type BatchSender struct { + BaseRequestSender cfg exporterbatcher.Config - mergeFunc exporterbatcher.BatchMergeFunc[Request] - mergeSplitFunc exporterbatcher.BatchMergeSplitFunc[Request] + mergeFunc exporterbatcher.BatchMergeFunc[internal.Request] + mergeSplitFunc exporterbatcher.BatchMergeSplitFunc[internal.Request] // concurrencyLimit is the maximum number of goroutines that can be blocked by the batcher. // If this number is reached and all the goroutines are busy, the batch will be sent right away. @@ -45,9 +46,9 @@ type batchSender struct { } // newBatchSender returns a new batch consumer component. -func newBatchSender(cfg exporterbatcher.Config, set exporter.Settings, - mf exporterbatcher.BatchMergeFunc[Request], msf exporterbatcher.BatchMergeSplitFunc[Request]) *batchSender { - bs := &batchSender{ +func NewBatchSender(cfg exporterbatcher.Config, set exporter.Settings, + mf exporterbatcher.BatchMergeFunc[internal.Request], msf exporterbatcher.BatchMergeSplitFunc[internal.Request]) *BatchSender { + bs := &BatchSender{ activeBatch: newEmptyBatch(), cfg: cfg, logger: set.Logger, @@ -60,7 +61,7 @@ func newBatchSender(cfg exporterbatcher.Config, set exporter.Settings, return bs } -func (bs *batchSender) Start(_ context.Context, _ component.Host) error { +func (bs *BatchSender) Start(_ context.Context, _ component.Host) error { bs.shutdownCh = make(chan struct{}) timer := time.NewTimer(bs.cfg.FlushTimeout) go func() { @@ -103,7 +104,7 @@ func (bs *batchSender) Start(_ context.Context, _ component.Host) error { type batch struct { ctx context.Context - request Request + request internal.Request done chan struct{} err error @@ -121,9 +122,9 @@ func newEmptyBatch() *batch { // exportActiveBatch exports the active batch asynchronously and replaces it with a new one. // Caller must hold the lock. -func (bs *batchSender) exportActiveBatch() { +func (bs *BatchSender) exportActiveBatch() { go func(b *batch) { - b.err = bs.nextSender.send(b.ctx, b.request) + b.err = bs.NextSender.Send(b.ctx, b.request) close(b.done) bs.activeRequests.Add(-b.requestsBlocked) }(bs.activeBatch) @@ -134,15 +135,15 @@ func (bs *batchSender) exportActiveBatch() { // isActiveBatchReady returns true if the active batch is ready to be exported. // The batch is ready if it has reached the minimum size or the concurrency limit is reached. // Caller must hold the lock. -func (bs *batchSender) isActiveBatchReady() bool { +func (bs *BatchSender) isActiveBatchReady() bool { return bs.activeBatch.request.ItemsCount() >= bs.cfg.MinSizeItems || (bs.concurrencyLimit > 0 && bs.activeRequests.Load() >= bs.concurrencyLimit) } -func (bs *batchSender) send(ctx context.Context, req Request) error { +func (bs *BatchSender) Send(ctx context.Context, req internal.Request) error { // Stopped batch sender should act as pass-through to allow the queue to be drained. if bs.stopped.Load() { - return bs.nextSender.send(ctx, req) + return bs.NextSender.Send(ctx, req) } if bs.cfg.MaxSizeItems > 0 { @@ -152,7 +153,7 @@ func (bs *batchSender) send(ctx context.Context, req Request) error { } // sendMergeSplitBatch sends the request to the batch which may be split into multiple requests. -func (bs *batchSender) sendMergeSplitBatch(ctx context.Context, req Request) error { +func (bs *BatchSender) sendMergeSplitBatch(ctx context.Context, req internal.Request) error { bs.mu.Lock() reqs, err := bs.mergeSplitFunc(ctx, bs.cfg.MaxSizeConfig, bs.activeBatch.request, req) @@ -187,7 +188,7 @@ func (bs *batchSender) sendMergeSplitBatch(ctx context.Context, req Request) err // Intentionally do not put the last request in the active batch to not block it. // TODO: Consider including the partial request in the error to avoid double publishing. for _, r := range reqs { - if err := bs.nextSender.send(ctx, r); err != nil { + if err := bs.NextSender.Send(ctx, r); err != nil { return err } } @@ -195,7 +196,7 @@ func (bs *batchSender) sendMergeSplitBatch(ctx context.Context, req Request) err } // sendMergeBatch sends the request to the batch and waits for the batch to be exported. -func (bs *batchSender) sendMergeBatch(ctx context.Context, req Request) error { +func (bs *BatchSender) sendMergeBatch(ctx context.Context, req internal.Request) error { bs.mu.Lock() if bs.activeBatch.request != nil { @@ -223,14 +224,14 @@ func (bs *batchSender) sendMergeBatch(ctx context.Context, req Request) error { // The context is only set once and is not updated after the first call. // Merging the context would be complex and require an additional goroutine to handle the context cancellation. // We take the approach of using the context from the first request since it's likely to have the shortest timeout. -func (bs *batchSender) updateActiveBatch(ctx context.Context, req Request) { +func (bs *BatchSender) updateActiveBatch(ctx context.Context, req internal.Request) { if bs.activeBatch.request == nil { bs.activeBatch.ctx = ctx } bs.activeBatch.request = req } -func (bs *batchSender) Shutdown(context.Context) error { +func (bs *BatchSender) Shutdown(context.Context) error { bs.stopped.Store(true) if bs.shutdownCh != nil { close(bs.shutdownCh) diff --git a/exporter/exporterhelper/batch_sender_test.go b/exporter/exporterhelper/internal/batch_sender_test.go similarity index 83% rename from exporter/exporterhelper/batch_sender_test.go rename to exporter/exporterhelper/internal/batch_sender_test.go index 5f5d49bec9a..04270e05342 100644 --- a/exporter/exporterhelper/batch_sender_test.go +++ b/exporter/exporterhelper/internal/batch_sender_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" +package internal import ( "context" @@ -17,6 +17,7 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/exporter/exporterbatcher" "go.opentelemetry.io/collector/exporter/exporterqueue" + "go.opentelemetry.io/collector/exporter/internal" ) func TestBatchSender_Merge(t *testing.T) { @@ -55,23 +56,23 @@ func TestBatchSender_Merge(t *testing.T) { sink := newFakeRequestSink() - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 8, sink: sink})) - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 3, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 8, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 3, sink: sink})) // the first two requests should be merged into one and sent by reaching the minimum items size assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 1 && sink.itemsCount.Load() == 11 }, 50*time.Millisecond, 10*time.Millisecond) - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 3, sink: sink})) - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 1, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 3, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 1, sink: sink})) // the third and fifth requests should be sent by reaching the timeout // the fourth request should be ignored because of the merge error. time.Sleep(50 * time.Millisecond) // should be ignored because of the merge error. - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 3, sink: sink, + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 3, sink: sink, mergeErr: errors.New("merge error")})) assert.Equal(t, uint64(1), sink.requestsCount.Load()) @@ -125,8 +126,8 @@ func TestBatchSender_BatchExportError(t *testing.T) { sink := newFakeRequestSink() - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) // the first two requests should be blocked by the batchSender. time.Sleep(50 * time.Millisecond) @@ -134,14 +135,14 @@ func TestBatchSender_BatchExportError(t *testing.T) { // the third request should trigger the export and cause an error. errReq := &fakeRequest{items: 20, exportErr: errors.New("transient error"), sink: sink} - require.NoError(t, be.send(context.Background(), errReq)) + require.NoError(t, be.Send(context.Background(), errReq)) // the batch should be dropped since the queue doesn't have requeuing enabled. assert.Eventually(t, func() bool { return sink.requestsCount.Load() == tt.expectedRequests && sink.itemsCount.Load() == tt.expectedItems && - be.batchSender.(*batchSender).activeRequests.Load() == 0 && - be.queueSender.(*queueSender).queue.Size() == 0 + be.BatchSender.(*BatchSender).activeRequests.Load() == 0 && + be.QueueSender.(*QueueSender).queue.Size() == 0 }, 100*time.Millisecond, 10*time.Millisecond) }) } @@ -162,24 +163,24 @@ func TestBatchSender_MergeOrSplit(t *testing.T) { sink := newFakeRequestSink() // should be sent right away by reaching the minimum items size. - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 8, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 8, sink: sink})) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 1 && sink.itemsCount.Load() == 8 }, 50*time.Millisecond, 10*time.Millisecond) // big request should be broken down into two requests, both are sent right away. - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 17, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 17, sink: sink})) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 3 && sink.itemsCount.Load() == 25 }, 50*time.Millisecond, 10*time.Millisecond) // request that cannot be split should be dropped. - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 11, sink: sink, + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 11, sink: sink, mergeErr: errors.New("split error")})) // big request should be broken down into two requests, both are sent right away. - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 13, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 13, sink: sink})) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 5 && sink.itemsCount.Load() == 38 @@ -194,7 +195,7 @@ func TestBatchSender_Shutdown(t *testing.T) { require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) sink := newFakeRequestSink() - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 3, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 3, sink: sink})) // To make the request reached the batchSender before shutdown. time.Sleep(50 * time.Millisecond) @@ -210,7 +211,7 @@ func TestBatchSender_Disabled(t *testing.T) { cfg := exporterbatcher.NewDefaultConfig() cfg.Enabled = false cfg.MaxSizeItems = 5 - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(cfg, WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NotNil(t, be) require.NoError(t, err) @@ -222,20 +223,20 @@ func TestBatchSender_Disabled(t *testing.T) { sink := newFakeRequestSink() // should be sent right away without splitting because batching is disabled. - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 8, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 8, sink: sink})) assert.Equal(t, uint64(1), sink.requestsCount.Load()) assert.Equal(t, uint64(8), sink.itemsCount.Load()) } func TestBatchSender_InvalidMergeSplitFunc(t *testing.T) { - invalidMergeSplitFunc := func(_ context.Context, _ exporterbatcher.MaxSizeConfig, _ Request, req2 Request) ([]Request, + invalidMergeSplitFunc := func(_ context.Context, _ exporterbatcher.MaxSizeConfig, _ internal.Request, req2 internal.Request) ([]internal.Request, error) { // reply with invalid 0 length slice if req2 is more than 20 items if req2.(*fakeRequest).items > 20 { - return []Request{}, nil + return []internal.Request{}, nil } // otherwise reply with a single request. - return []Request{req2}, nil + return []internal.Request{req2}, nil } cfg := exporterbatcher.NewDefaultConfig() cfg.FlushTimeout = 50 * time.Millisecond @@ -249,16 +250,16 @@ func TestBatchSender_InvalidMergeSplitFunc(t *testing.T) { sink := newFakeRequestSink() // first request should be ignored due to invalid merge/split function. - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 30, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 30, sink: sink})) // second request should be sent after reaching the timeout. - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 15, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 15, sink: sink})) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 1 && sink.itemsCount.Load() == 15 }, 100*time.Millisecond, 10*time.Millisecond) } func TestBatchSender_PostShutdown(t *testing.T) { - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(exporterbatcher.NewDefaultConfig(), WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NotNil(t, be) @@ -268,7 +269,7 @@ func TestBatchSender_PostShutdown(t *testing.T) { // Closed batch sender should act as a pass-through to not block queue draining. sink := newFakeRequestSink() - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 8, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 8, sink: sink})) assert.Equal(t, uint64(1), sink.requestsCount.Load()) assert.Equal(t, uint64(8), sink.itemsCount.Load()) } @@ -321,9 +322,9 @@ func TestBatchSender_ConcurrencyLimitReached(t *testing.T) { t.Run(tt.name, func(t *testing.T) { qCfg := exporterqueue.NewDefaultConfig() qCfg.NumConsumers = 2 - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(tt.batcherCfg, WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc)), - WithRequestQueue(qCfg, exporterqueue.NewMemoryQueueFactory[Request]())) + WithRequestQueue(qCfg, exporterqueue.NewMemoryQueueFactory[internal.Request]())) require.NotNil(t, be) require.NoError(t, err) assert.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -333,29 +334,29 @@ func TestBatchSender_ConcurrencyLimitReached(t *testing.T) { sink := newFakeRequestSink() // the 1st and 2nd request should be flushed in the same batched request by max concurrency limit. - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 2, sink: sink})) - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 2, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 2, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 2, sink: sink})) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 1 && sink.itemsCount.Load() == 4 }, 100*time.Millisecond, 10*time.Millisecond) // the 3rd request should be flushed by itself due to flush interval - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 2, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 2, sink: sink})) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 2 && sink.itemsCount.Load() == 6 }, 100*time.Millisecond, 10*time.Millisecond) // the 4th and 5th request should be flushed in the same batched request by max concurrency limit. - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 2, sink: sink})) - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 2, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 2, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 2, sink: sink})) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == 3 && sink.itemsCount.Load() == 10 }, 100*time.Millisecond, 10*time.Millisecond) // do it a few more times to ensure it produces the correct batch size regardless of goroutine scheduling. - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 5, sink: sink})) - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 6, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 5, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 6, sink: sink})) if tt.batcherCfg.MaxSizeItems == 10 { // in case of MaxSizeItems=10, wait for the leftover request to send assert.Eventually(t, func() bool { @@ -363,9 +364,9 @@ func TestBatchSender_ConcurrencyLimitReached(t *testing.T) { }, 50*time.Millisecond, 10*time.Millisecond) } - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 6, sink: sink})) - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 20, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 6, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 20, sink: sink})) assert.Eventually(t, func() bool { return sink.requestsCount.Load() == tt.expectedRequests && sink.itemsCount.Load() == tt.expectedItems }, 100*time.Millisecond, 10*time.Millisecond) @@ -376,7 +377,7 @@ func TestBatchSender_ConcurrencyLimitReached(t *testing.T) { func TestBatchSender_BatchBlocking(t *testing.T) { bCfg := exporterbatcher.NewDefaultConfig() bCfg.MinSizeItems = 3 - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(bCfg, WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NotNil(t, be) require.NoError(t, err) @@ -389,7 +390,7 @@ func TestBatchSender_BatchBlocking(t *testing.T) { for i := 0; i < 6; i++ { wg.Add(1) go func() { - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 10 * time.Millisecond})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 10 * time.Millisecond})) wg.Done() }() } @@ -406,7 +407,7 @@ func TestBatchSender_BatchBlocking(t *testing.T) { func TestBatchSender_BatchCancelled(t *testing.T) { bCfg := exporterbatcher.NewDefaultConfig() bCfg.MinSizeItems = 2 - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(bCfg, WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NotNil(t, be) require.NoError(t, err) @@ -419,13 +420,13 @@ func TestBatchSender_BatchCancelled(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) wg.Add(1) go func() { - assert.ErrorIs(t, be.send(ctx, &fakeRequest{items: 1, sink: sink, delay: 100 * time.Millisecond}), context.Canceled) + assert.ErrorIs(t, be.Send(ctx, &fakeRequest{items: 1, sink: sink, delay: 100 * time.Millisecond}), context.Canceled) wg.Done() }() wg.Add(1) go func() { time.Sleep(20 * time.Millisecond) // ensure this call is the second - assert.ErrorIs(t, be.send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 100 * time.Millisecond}), context.Canceled) + assert.ErrorIs(t, be.Send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 100 * time.Millisecond}), context.Canceled) wg.Done() }() cancel() // canceling the first request should cancel the whole batch @@ -441,7 +442,7 @@ func TestBatchSender_BatchCancelled(t *testing.T) { func TestBatchSender_DrainActiveRequests(t *testing.T) { bCfg := exporterbatcher.NewDefaultConfig() bCfg.MinSizeItems = 2 - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(bCfg, WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NotNil(t, be) require.NoError(t, err) @@ -451,13 +452,13 @@ func TestBatchSender_DrainActiveRequests(t *testing.T) { // send 3 blocking requests with a timeout go func() { - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 40 * time.Millisecond})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 40 * time.Millisecond})) }() go func() { - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 40 * time.Millisecond})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 40 * time.Millisecond})) }() go func() { - assert.NoError(t, be.send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 40 * time.Millisecond})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 1, sink: sink, delay: 40 * time.Millisecond})) }() // give time for the first two requests to be batched @@ -484,13 +485,13 @@ func TestBatchSender_WithBatcherOption(t *testing.T) { }, { name: "funcs_set_internally", - opts: []Option{withBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), WithBatcher(exporterbatcher.NewDefaultConfig())}, + opts: []Option{WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), WithBatcher(exporterbatcher.NewDefaultConfig())}, expectedErr: false, }, { name: "funcs_set_twice", opts: []Option{ - withBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), + WithBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc), WithBatcher(exporterbatcher.NewDefaultConfig(), WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc)), }, @@ -504,7 +505,7 @@ func TestBatchSender_WithBatcherOption(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, tt.opts...) + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, tt.opts...) if tt.expectedErr { assert.Nil(t, be) assert.Error(t, err) @@ -517,7 +518,7 @@ func TestBatchSender_WithBatcherOption(t *testing.T) { } func TestBatchSender_UnstartedShutdown(t *testing.T) { - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(exporterbatcher.NewDefaultConfig(), WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NoError(t, err) @@ -532,7 +533,7 @@ func TestBatchSender_ShutdownDeadlock(t *testing.T) { waitMerge := make(chan struct{}, 10) // blockedBatchMergeFunc blocks until the blockMerge channel is closed - blockedBatchMergeFunc := func(_ context.Context, r1 Request, r2 Request) (Request, error) { + blockedBatchMergeFunc := func(_ context.Context, r1 internal.Request, r2 internal.Request) (internal.Request, error) { waitMerge <- struct{}{} <-blockMerge r1.(*fakeRequest).items += r2.(*fakeRequest).items @@ -541,7 +542,7 @@ func TestBatchSender_ShutdownDeadlock(t *testing.T) { bCfg := exporterbatcher.NewDefaultConfig() bCfg.FlushTimeout = 10 * time.Minute // high timeout to avoid the timeout to trigger - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(bCfg, WithRequestBatchFuncs(blockedBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -549,8 +550,8 @@ func TestBatchSender_ShutdownDeadlock(t *testing.T) { sink := newFakeRequestSink() // Send 2 concurrent requests - go func() { require.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() - go func() { require.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() + go func() { require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() + go func() { require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() // Wait for the requests to enter the merge function <-waitMerge @@ -577,7 +578,7 @@ func TestBatchSenderWithTimeout(t *testing.T) { bCfg.MinSizeItems = 10 tCfg := NewDefaultTimeoutConfig() tCfg.Timeout = 50 * time.Millisecond - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(bCfg, WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc)), WithTimeout(tCfg)) require.NoError(t, err) @@ -590,7 +591,7 @@ func TestBatchSenderWithTimeout(t *testing.T) { for i := 0; i < 3; i++ { wg.Add(1) go func() { - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) wg.Done() }() } @@ -602,7 +603,7 @@ func TestBatchSenderWithTimeout(t *testing.T) { for i := 0; i < 3; i++ { wg.Add(1) go func() { - assert.Error(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink, delay: 30 * time.Millisecond})) + assert.Error(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink, delay: 30 * time.Millisecond})) wg.Done() }() } @@ -616,7 +617,7 @@ func TestBatchSenderWithTimeout(t *testing.T) { } func TestBatchSenderTimerResetNoConflict(t *testing.T) { - delayBatchMergeFunc := func(_ context.Context, r1 Request, r2 Request) (Request, error) { + delayBatchMergeFunc := func(_ context.Context, r1 internal.Request, r2 internal.Request) (internal.Request, error) { time.Sleep(30 * time.Millisecond) if r1 == nil { return r2, nil @@ -636,7 +637,7 @@ func TestBatchSenderTimerResetNoConflict(t *testing.T) { bCfg := exporterbatcher.NewDefaultConfig() bCfg.MinSizeItems = 8 bCfg.FlushTimeout = 50 * time.Millisecond - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(bCfg, WithRequestBatchFuncs(delayBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -644,11 +645,11 @@ func TestBatchSenderTimerResetNoConflict(t *testing.T) { // Send 2 concurrent requests that should be merged in one batch in the same interval as the flush timer go func() { - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() time.Sleep(30 * time.Millisecond) go func() { - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() // The batch should be sent either with the flush interval or by reaching the minimum items size with no conflict @@ -667,7 +668,7 @@ func TestBatchSenderTimerFlush(t *testing.T) { bCfg := exporterbatcher.NewDefaultConfig() bCfg.MinSizeItems = 8 bCfg.FlushTimeout = 100 * time.Millisecond - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithBatcher(bCfg, WithRequestBatchFuncs(fakeBatchMergeFunc, fakeBatchMergeSplitFunc))) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -676,10 +677,10 @@ func TestBatchSenderTimerFlush(t *testing.T) { // Send 2 concurrent requests that should be merged in one batch and sent immediately go func() { - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() go func() { - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() assert.EventuallyWithT(t, func(c *assert.CollectT) { assert.LessOrEqual(c, uint64(1), sink.requestsCount.Load()) @@ -688,7 +689,7 @@ func TestBatchSenderTimerFlush(t *testing.T) { // Send another request that should be flushed after 100ms instead of 50ms since last flush go func() { - require.NoError(t, be.send(context.Background(), &fakeRequest{items: 4, sink: sink})) + require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() // Confirm that it is not flushed in 50ms @@ -703,9 +704,9 @@ func TestBatchSenderTimerFlush(t *testing.T) { require.NoError(t, be.Shutdown(context.Background())) } -func queueBatchExporter(t *testing.T, batchOption Option) *baseExporter { - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, batchOption, - WithRequestQueue(exporterqueue.NewDefaultConfig(), exporterqueue.NewMemoryQueueFactory[Request]())) +func queueBatchExporter(t *testing.T, batchOption Option) *BaseExporter { + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, batchOption, + WithRequestQueue(exporterqueue.NewDefaultConfig(), exporterqueue.NewMemoryQueueFactory[internal.Request]())) require.NotNil(t, be) require.NoError(t, err) return be diff --git a/exporter/exporterhelper/obsexporter.go b/exporter/exporterhelper/internal/obsexporter.go similarity index 50% rename from exporter/exporterhelper/obsexporter.go rename to exporter/exporterhelper/internal/obsexporter.go index 61be5cb5ba6..0482ecd4257 100644 --- a/exporter/exporterhelper/obsexporter.go +++ b/exporter/exporterhelper/internal/obsexporter.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" import ( "context" @@ -13,122 +13,121 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata" ) -// obsReport is a helper to add observability to an exporter. -type obsReport struct { +// ObsReport is a helper to add observability to an exporter. +type ObsReport struct { spanNamePrefix string tracer trace.Tracer - dataType component.DataType + DataType component.DataType otelAttrs []attribute.KeyValue - telemetryBuilder *metadata.TelemetryBuilder + TelemetryBuilder *metadata.TelemetryBuilder } -// obsReportSettings are settings for creating an obsReport. -type obsReportSettings struct { - exporterID component.ID - exporterCreateSettings exporter.Settings - dataType component.DataType +// ObsReportSettings are settings for creating an ObsReport. +type ObsReportSettings struct { + ExporterID component.ID + ExporterCreateSettings exporter.Settings + DataType component.DataType } -func newExporter(cfg obsReportSettings) (*obsReport, error) { - telemetryBuilder, err := metadata.NewTelemetryBuilder(cfg.exporterCreateSettings.TelemetrySettings) +func NewExporter(cfg ObsReportSettings) (*ObsReport, error) { + telemetryBuilder, err := metadata.NewTelemetryBuilder(cfg.ExporterCreateSettings.TelemetrySettings) if err != nil { return nil, err } - return &obsReport{ - spanNamePrefix: internal.ExporterPrefix + cfg.exporterID.String(), - tracer: cfg.exporterCreateSettings.TracerProvider.Tracer(cfg.exporterID.String()), - dataType: cfg.dataType, + return &ObsReport{ + spanNamePrefix: ExporterPrefix + cfg.ExporterID.String(), + tracer: cfg.ExporterCreateSettings.TracerProvider.Tracer(cfg.ExporterID.String()), + DataType: cfg.DataType, otelAttrs: []attribute.KeyValue{ - attribute.String(internal.ExporterKey, cfg.exporterID.String()), + attribute.String(ExporterKey, cfg.ExporterID.String()), }, - telemetryBuilder: telemetryBuilder, + TelemetryBuilder: telemetryBuilder, }, nil } -// startTracesOp is called at the start of an Export operation. +// StartTracesOp is called at the start of an Export operation. // The returned context should be used in other calls to the Exporter functions // dealing with the same export operation. -func (or *obsReport) startTracesOp(ctx context.Context) context.Context { - return or.startOp(ctx, internal.ExportTraceDataOperationSuffix) +func (or *ObsReport) StartTracesOp(ctx context.Context) context.Context { + return or.startOp(ctx, ExportTraceDataOperationSuffix) } -// endTracesOp completes the export operation that was started with startTracesOp. -func (or *obsReport) endTracesOp(ctx context.Context, numSpans int, err error) { +// EndTracesOp completes the export operation that was started with startTracesOp. +func (or *ObsReport) EndTracesOp(ctx context.Context, numSpans int, err error) { numSent, numFailedToSend := toNumItems(numSpans, err) or.recordMetrics(context.WithoutCancel(ctx), component.DataTypeTraces, numSent, numFailedToSend) - endSpan(ctx, err, numSent, numFailedToSend, internal.SentSpansKey, internal.FailedToSendSpansKey) + endSpan(ctx, err, numSent, numFailedToSend, SentSpansKey, FailedToSendSpansKey) } -// startMetricsOp is called at the start of an Export operation. +// StartMetricsOp is called at the start of an Export operation. // The returned context should be used in other calls to the Exporter functions // dealing with the same export operation. -func (or *obsReport) startMetricsOp(ctx context.Context) context.Context { - return or.startOp(ctx, internal.ExportMetricsOperationSuffix) +func (or *ObsReport) StartMetricsOp(ctx context.Context) context.Context { + return or.startOp(ctx, ExportMetricsOperationSuffix) } -// endMetricsOp completes the export operation that was started with +// EndMetricsOp completes the export operation that was started with // startMetricsOp. // // If needed, report your use case in https://github.com/open-telemetry/opentelemetry-collector/issues/10592. -func (or *obsReport) endMetricsOp(ctx context.Context, numMetricPoints int, err error) { +func (or *ObsReport) EndMetricsOp(ctx context.Context, numMetricPoints int, err error) { numSent, numFailedToSend := toNumItems(numMetricPoints, err) or.recordMetrics(context.WithoutCancel(ctx), component.DataTypeMetrics, numSent, numFailedToSend) - endSpan(ctx, err, numSent, numFailedToSend, internal.SentMetricPointsKey, internal.FailedToSendMetricPointsKey) + endSpan(ctx, err, numSent, numFailedToSend, SentMetricPointsKey, FailedToSendMetricPointsKey) } -// startLogsOp is called at the start of an Export operation. +// StartLogsOp is called at the start of an Export operation. // The returned context should be used in other calls to the Exporter functions // dealing with the same export operation. -func (or *obsReport) startLogsOp(ctx context.Context) context.Context { - return or.startOp(ctx, internal.ExportLogsOperationSuffix) +func (or *ObsReport) StartLogsOp(ctx context.Context) context.Context { + return or.startOp(ctx, ExportLogsOperationSuffix) } -// endLogsOp completes the export operation that was started with startLogsOp. -func (or *obsReport) endLogsOp(ctx context.Context, numLogRecords int, err error) { +// EndLogsOp completes the export operation that was started with startLogsOp. +func (or *ObsReport) EndLogsOp(ctx context.Context, numLogRecords int, err error) { numSent, numFailedToSend := toNumItems(numLogRecords, err) or.recordMetrics(context.WithoutCancel(ctx), component.DataTypeLogs, numSent, numFailedToSend) - endSpan(ctx, err, numSent, numFailedToSend, internal.SentLogRecordsKey, internal.FailedToSendLogRecordsKey) + endSpan(ctx, err, numSent, numFailedToSend, SentLogRecordsKey, FailedToSendLogRecordsKey) } -// startProfilesOp is called at the start of an Export operation. +// StartProfilesOp is called at the start of an Export operation. // The returned context should be used in other calls to the Exporter functions // dealing with the same export operation. -func (or *obsReport) startProfilesOp(ctx context.Context) context.Context { - return or.startOp(ctx, obsmetrics.ExportTraceDataOperationSuffix) +func (or *ObsReport) StartProfilesOp(ctx context.Context) context.Context { + return or.startOp(ctx, ExportTraceDataOperationSuffix) } -// endProfilesOp completes the export operation that was started with startProfilesOp. -func (or *obsReport) endProfilesOp(ctx context.Context, numSpans int, err error) { +// EndProfilesOp completes the export operation that was started with startProfilesOp. +func (or *ObsReport) EndProfilesOp(ctx context.Context, numSpans int, err error) { numSent, numFailedToSend := toNumItems(numSpans, err) - endSpan(ctx, err, numSent, numFailedToSend, obsmetrics.SentSamplesKey, obsmetrics.FailedToSendSamplesKey) + endSpan(ctx, err, numSent, numFailedToSend, SentSamplesKey, FailedToSendSamplesKey) } // startOp creates the span used to trace the operation. Returning // the updated context and the created span. -func (or *obsReport) startOp(ctx context.Context, operationSuffix string) context.Context { +func (or *ObsReport) startOp(ctx context.Context, operationSuffix string) context.Context { spanName := or.spanNamePrefix + operationSuffix ctx, _ = or.tracer.Start(ctx, spanName) return ctx } -func (or *obsReport) recordMetrics(ctx context.Context, dataType component.DataType, sent, failed int64) { +func (or *ObsReport) recordMetrics(ctx context.Context, dataType component.DataType, sent, failed int64) { var sentMeasure, failedMeasure metric.Int64Counter switch dataType { case component.DataTypeTraces: - sentMeasure = or.telemetryBuilder.ExporterSentSpans - failedMeasure = or.telemetryBuilder.ExporterSendFailedSpans + sentMeasure = or.TelemetryBuilder.ExporterSentSpans + failedMeasure = or.TelemetryBuilder.ExporterSendFailedSpans case component.DataTypeMetrics: - sentMeasure = or.telemetryBuilder.ExporterSentMetricPoints - failedMeasure = or.telemetryBuilder.ExporterSendFailedMetricPoints + sentMeasure = or.TelemetryBuilder.ExporterSentMetricPoints + failedMeasure = or.TelemetryBuilder.ExporterSendFailedMetricPoints case component.DataTypeLogs: - sentMeasure = or.telemetryBuilder.ExporterSentLogRecords - failedMeasure = or.telemetryBuilder.ExporterSendFailedLogRecords + sentMeasure = or.TelemetryBuilder.ExporterSentLogRecords + failedMeasure = or.TelemetryBuilder.ExporterSendFailedLogRecords } sentMeasure.Add(ctx, sent, metric.WithAttributes(or.otelAttrs...)) @@ -157,15 +156,15 @@ func toNumItems(numExportedItems int, err error) (int64, int64) { return int64(numExportedItems), 0 } -func (or *obsReport) recordEnqueueFailure(ctx context.Context, dataType component.DataType, failed int64) { +func (or *ObsReport) RecordEnqueueFailure(ctx context.Context, dataType component.DataType, failed int64) { var enqueueFailedMeasure metric.Int64Counter switch dataType { case component.DataTypeTraces: - enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedSpans + enqueueFailedMeasure = or.TelemetryBuilder.ExporterEnqueueFailedSpans case component.DataTypeMetrics: - enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedMetricPoints + enqueueFailedMeasure = or.TelemetryBuilder.ExporterEnqueueFailedMetricPoints case component.DataTypeLogs: - enqueueFailedMeasure = or.telemetryBuilder.ExporterEnqueueFailedLogRecords + enqueueFailedMeasure = or.TelemetryBuilder.ExporterEnqueueFailedLogRecords } enqueueFailedMeasure.Add(ctx, failed, metric.WithAttributes(or.otelAttrs...)) diff --git a/exporter/exporterhelper/obsexporter_test.go b/exporter/exporterhelper/internal/obsexporter_test.go similarity index 73% rename from exporter/exporterhelper/obsexporter_test.go rename to exporter/exporterhelper/internal/obsexporter_test.go index 802068983f0..3f15259c659 100644 --- a/exporter/exporterhelper/obsexporter_test.go +++ b/exporter/exporterhelper/internal/obsexporter_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" +package internal import ( "context" @@ -16,7 +16,6 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper/internal" ) var ( @@ -30,9 +29,9 @@ func TestExportTraceDataOp(t *testing.T) { parentCtx, parentSpan := tt.TelemetrySettings().TracerProvider.Tracer("test").Start(context.Background(), t.Name()) defer parentSpan.End() - obsrep, err := newExporter(obsReportSettings{ - exporterID: exporterID, - exporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + obsrep, err := NewExporter(ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, }) require.NoError(t, err) @@ -41,9 +40,9 @@ func TestExportTraceDataOp(t *testing.T) { {items: 14, err: errFake}, } for i := range params { - ctx := obsrep.startTracesOp(parentCtx) + ctx := obsrep.StartTracesOp(parentCtx) assert.NotNil(t, ctx) - obsrep.endTracesOp(ctx, params[i].items, params[i].err) + obsrep.EndTracesOp(ctx, params[i].items, params[i].err) } spans := tt.SpanRecorder.Ended() @@ -55,13 +54,13 @@ func TestExportTraceDataOp(t *testing.T) { switch { case params[i].err == nil: sentSpans += params[i].items - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.SentSpansKey, Value: attribute.Int64Value(int64(params[i].items))}) - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.FailedToSendSpansKey, Value: attribute.Int64Value(0)}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: SentSpansKey, Value: attribute.Int64Value(int64(params[i].items))}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: FailedToSendSpansKey, Value: attribute.Int64Value(0)}) assert.Equal(t, codes.Unset, span.Status().Code) case errors.Is(params[i].err, errFake): failedToSendSpans += params[i].items - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.SentSpansKey, Value: attribute.Int64Value(0)}) - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.FailedToSendSpansKey, Value: attribute.Int64Value(int64(params[i].items))}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: SentSpansKey, Value: attribute.Int64Value(0)}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: FailedToSendSpansKey, Value: attribute.Int64Value(int64(params[i].items))}) assert.Equal(t, codes.Error, span.Status().Code) assert.Equal(t, params[i].err.Error(), span.Status().Description) default: @@ -78,9 +77,9 @@ func TestExportMetricsOp(t *testing.T) { parentCtx, parentSpan := tt.TelemetrySettings().TracerProvider.Tracer("test").Start(context.Background(), t.Name()) defer parentSpan.End() - obsrep, err := newExporter(obsReportSettings{ - exporterID: exporterID, - exporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + obsrep, err := NewExporter(ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, }) require.NoError(t, err) @@ -89,10 +88,10 @@ func TestExportMetricsOp(t *testing.T) { {items: 23, err: errFake}, } for i := range params { - ctx := obsrep.startMetricsOp(parentCtx) + ctx := obsrep.StartMetricsOp(parentCtx) assert.NotNil(t, ctx) - obsrep.endMetricsOp(ctx, params[i].items, params[i].err) + obsrep.EndMetricsOp(ctx, params[i].items, params[i].err) } spans := tt.SpanRecorder.Ended() @@ -104,13 +103,13 @@ func TestExportMetricsOp(t *testing.T) { switch { case params[i].err == nil: sentMetricPoints += params[i].items - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.SentMetricPointsKey, Value: attribute.Int64Value(int64(params[i].items))}) - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.FailedToSendMetricPointsKey, Value: attribute.Int64Value(0)}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: SentMetricPointsKey, Value: attribute.Int64Value(int64(params[i].items))}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: FailedToSendMetricPointsKey, Value: attribute.Int64Value(0)}) assert.Equal(t, codes.Unset, span.Status().Code) case errors.Is(params[i].err, errFake): failedToSendMetricPoints += params[i].items - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.SentMetricPointsKey, Value: attribute.Int64Value(0)}) - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.FailedToSendMetricPointsKey, Value: attribute.Int64Value(int64(params[i].items))}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: SentMetricPointsKey, Value: attribute.Int64Value(0)}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: FailedToSendMetricPointsKey, Value: attribute.Int64Value(int64(params[i].items))}) assert.Equal(t, codes.Error, span.Status().Code) assert.Equal(t, params[i].err.Error(), span.Status().Description) default: @@ -127,9 +126,9 @@ func TestExportLogsOp(t *testing.T) { parentCtx, parentSpan := tt.TelemetrySettings().TracerProvider.Tracer("test").Start(context.Background(), t.Name()) defer parentSpan.End() - obsrep, err := newExporter(obsReportSettings{ - exporterID: exporterID, - exporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + obsrep, err := NewExporter(ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, }) require.NoError(t, err) @@ -138,10 +137,10 @@ func TestExportLogsOp(t *testing.T) { {items: 23, err: errFake}, } for i := range params { - ctx := obsrep.startLogsOp(parentCtx) + ctx := obsrep.StartLogsOp(parentCtx) assert.NotNil(t, ctx) - obsrep.endLogsOp(ctx, params[i].items, params[i].err) + obsrep.EndLogsOp(ctx, params[i].items, params[i].err) } spans := tt.SpanRecorder.Ended() @@ -153,13 +152,13 @@ func TestExportLogsOp(t *testing.T) { switch { case params[i].err == nil: sentLogRecords += params[i].items - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.SentLogRecordsKey, Value: attribute.Int64Value(int64(params[i].items))}) - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.FailedToSendLogRecordsKey, Value: attribute.Int64Value(0)}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: SentLogRecordsKey, Value: attribute.Int64Value(int64(params[i].items))}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: FailedToSendLogRecordsKey, Value: attribute.Int64Value(0)}) assert.Equal(t, codes.Unset, span.Status().Code) case errors.Is(params[i].err, errFake): failedToSendLogRecords += params[i].items - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.SentLogRecordsKey, Value: attribute.Int64Value(0)}) - require.Contains(t, span.Attributes(), attribute.KeyValue{Key: internal.FailedToSendLogRecordsKey, Value: attribute.Int64Value(int64(params[i].items))}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: SentLogRecordsKey, Value: attribute.Int64Value(0)}) + require.Contains(t, span.Attributes(), attribute.KeyValue{Key: FailedToSendLogRecordsKey, Value: attribute.Int64Value(int64(params[i].items))}) assert.Equal(t, codes.Error, span.Status().Code) assert.Equal(t, params[i].err.Error(), span.Status().Description) default: @@ -176,14 +175,14 @@ func TestCheckExporterTracesViews(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) - obsrep, err := newExporter(obsReportSettings{ - exporterID: exporterID, - exporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + obsrep, err := NewExporter(ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, }) require.NoError(t, err) - ctx := obsrep.startTracesOp(context.Background()) + ctx := obsrep.StartTracesOp(context.Background()) require.NotNil(t, ctx) - obsrep.endTracesOp(ctx, 7, nil) + obsrep.EndTracesOp(ctx, 7, nil) assert.NoError(t, tt.CheckExporterTraces(7, 0)) assert.Error(t, tt.CheckExporterTraces(7, 7)) @@ -196,14 +195,14 @@ func TestCheckExporterMetricsViews(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) - obsrep, err := newExporter(obsReportSettings{ - exporterID: exporterID, - exporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + obsrep, err := NewExporter(ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, }) require.NoError(t, err) - ctx := obsrep.startMetricsOp(context.Background()) + ctx := obsrep.StartMetricsOp(context.Background()) require.NotNil(t, ctx) - obsrep.endMetricsOp(ctx, 7, nil) + obsrep.EndMetricsOp(ctx, 7, nil) assert.NoError(t, tt.CheckExporterMetrics(7, 0)) assert.Error(t, tt.CheckExporterMetrics(7, 7)) @@ -216,14 +215,14 @@ func TestCheckExporterLogsViews(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) - obsrep, err := newExporter(obsReportSettings{ - exporterID: exporterID, - exporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + obsrep, err := NewExporter(ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, }) require.NoError(t, err) - ctx := obsrep.startLogsOp(context.Background()) + ctx := obsrep.StartLogsOp(context.Background()) require.NotNil(t, ctx) - obsrep.endLogsOp(ctx, 7, nil) + obsrep.EndLogsOp(ctx, 7, nil) assert.NoError(t, tt.CheckExporterLogs(7, 0)) assert.Error(t, tt.CheckExporterLogs(7, 7)) diff --git a/exporter/exporterhelper/internal/queue_sender.go b/exporter/exporterhelper/internal/queue_sender.go new file mode 100644 index 00000000000..60a94966336 --- /dev/null +++ b/exporter/exporterhelper/internal/queue_sender.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/trace" + "go.uber.org/multierr" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterqueue" + "go.opentelemetry.io/collector/exporter/internal" + "go.opentelemetry.io/collector/exporter/internal/queue" +) + +const defaultQueueSize = 1000 + +// Deprecated: [v0.110.0] Use QueueConfig instead. +type QueueSettings = QueueConfig + +// QueueConfig defines configuration for queueing batches before sending to the consumerSender. +type QueueConfig struct { + // Enabled indicates whether to not enqueue batches before sending to the consumerSender. + Enabled bool `mapstructure:"enabled"` + // NumConsumers is the number of consumers from the queue. Defaults to 10. + // If batching is enabled, a combined batch cannot contain more requests than the number of consumers. + // So it's recommended to set higher number of consumers if batching is enabled. + NumConsumers int `mapstructure:"num_consumers"` + // QueueSize is the maximum number of batches allowed in queue at a given time. + QueueSize int `mapstructure:"queue_size"` + // StorageID if not empty, enables the persistent storage and uses the component specified + // as a storage extension for the persistent queue + StorageID *component.ID `mapstructure:"storage"` +} + +// Deprecated: [v0.110.0] Use NewDefaultQueueConfig instead. +func NewDefaultQueueSettings() QueueSettings { + return NewDefaultQueueConfig() +} + +// NewDefaultQueueConfig returns the default config for QueueConfig. +func NewDefaultQueueConfig() QueueConfig { + return QueueConfig{ + Enabled: true, + NumConsumers: 10, + // By default, batches are 8192 spans, for a total of up to 8 million spans in the queue + // This can be estimated at 1-4 GB worth of maximum memory usage + // This default is probably still too high, and may be adjusted further down in a future release + QueueSize: defaultQueueSize, + } +} + +// Validate checks if the QueueConfig configuration is valid +func (qCfg *QueueConfig) Validate() error { + if !qCfg.Enabled { + return nil + } + + if qCfg.QueueSize <= 0 { + return errors.New("queue size must be positive") + } + + if qCfg.NumConsumers <= 0 { + return errors.New("number of queue consumers must be positive") + } + + return nil +} + +type QueueSender struct { + BaseRequestSender + queue exporterqueue.Queue[internal.Request] + numConsumers int + traceAttribute attribute.KeyValue + consumers *queue.Consumers[internal.Request] + + obsrep *ObsReport + exporterID component.ID +} + +func NewQueueSender(q exporterqueue.Queue[internal.Request], set exporter.Settings, numConsumers int, + exportFailureMessage string, obsrep *ObsReport) *QueueSender { + qs := &QueueSender{ + queue: q, + numConsumers: numConsumers, + traceAttribute: attribute.String(ExporterKey, set.ID.String()), + obsrep: obsrep, + exporterID: set.ID, + } + consumeFunc := func(ctx context.Context, req internal.Request) error { + err := qs.NextSender.Send(ctx, req) + if err != nil { + set.Logger.Error("Exporting failed. Dropping data."+exportFailureMessage, + zap.Error(err), zap.Int("dropped_items", req.ItemsCount())) + } + return err + } + qs.consumers = queue.NewQueueConsumers[internal.Request](q, numConsumers, consumeFunc) + return qs +} + +// Start is invoked during service startup. +func (qs *QueueSender) Start(ctx context.Context, host component.Host) error { + if err := qs.consumers.Start(ctx, host); err != nil { + return err + } + + dataTypeAttr := attribute.String(DataTypeKey, qs.obsrep.DataType.String()) + return multierr.Append( + qs.obsrep.TelemetryBuilder.InitExporterQueueSize(func() int64 { return int64(qs.queue.Size()) }, + metric.WithAttributeSet(attribute.NewSet(qs.traceAttribute, dataTypeAttr))), + qs.obsrep.TelemetryBuilder.InitExporterQueueCapacity(func() int64 { return int64(qs.queue.Capacity()) }, + metric.WithAttributeSet(attribute.NewSet(qs.traceAttribute))), + ) +} + +// Shutdown is invoked during service shutdown. +func (qs *QueueSender) Shutdown(ctx context.Context) error { + // Stop the queue and consumers, this will drain the queue and will call the retry (which is stopped) that will only + // try once every request. + return qs.consumers.Shutdown(ctx) +} + +// send implements the requestSender interface. It puts the request in the queue. +func (qs *QueueSender) Send(ctx context.Context, req internal.Request) error { + // Prevent cancellation and deadline to propagate to the context stored in the queue. + // The grpc/http based receivers will cancel the request context after this function returns. + c := context.WithoutCancel(ctx) + + span := trace.SpanFromContext(c) + if err := qs.queue.Offer(c, req); err != nil { + span.AddEvent("Failed to enqueue item.", trace.WithAttributes(qs.traceAttribute)) + return err + } + + span.AddEvent("Enqueued item.", trace.WithAttributes(qs.traceAttribute)) + return nil +} + +type MockHost struct { + component.Host + Ext map[component.ID]component.Component +} + +func (nh *MockHost) GetExtensions() map[component.ID]component.Component { + return nh.Ext +} diff --git a/exporter/exporterhelper/queue_sender_test.go b/exporter/exporterhelper/internal/queue_sender_test.go similarity index 77% rename from exporter/exporterhelper/queue_sender_test.go rename to exporter/exporterhelper/internal/queue_sender_test.go index 08484ad0496..fcd43d6ff0f 100644 --- a/exporter/exporterhelper/queue_sender_test.go +++ b/exporter/exporterhelper/internal/queue_sender_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper +package internal import ( "context" @@ -19,9 +19,9 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/config/configretry" "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/exporter/internal" "go.opentelemetry.io/collector/exporter/internal/queue" ) @@ -29,45 +29,45 @@ func TestQueuedRetry_StopWhileWaiting(t *testing.T) { qCfg := NewDefaultQueueConfig() qCfg.NumConsumers = 1 rCfg := configretry.NewDefaultBackOffConfig() - be, err := newBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) firstMockR := newErrorRequest() ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(context.Background(), firstMockR)) + require.NoError(t, be.Send(context.Background(), firstMockR)) }) // Enqueue another request to ensure when calling shutdown we drain the queue. secondMockR := newMockRequest(3, nil) ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(context.Background(), secondMockR)) + require.NoError(t, be.Send(context.Background(), secondMockR)) }) - require.LessOrEqual(t, 1, be.queueSender.(*queueSender).queue.Size()) + require.LessOrEqual(t, 1, be.QueueSender.(*QueueSender).queue.Size()) assert.NoError(t, be.Shutdown(context.Background())) secondMockR.checkNumRequests(t, 1) ocs.checkSendItemsCount(t, 3) ocs.checkDroppedItemsCount(t, 7) - require.Zero(t, be.queueSender.(*queueSender).queue.Size()) + require.Zero(t, be.QueueSender.(*QueueSender).queue.Size()) } func TestQueuedRetry_DoNotPreserveCancellation(t *testing.T) { qCfg := NewDefaultQueueConfig() qCfg.NumConsumers = 1 rCfg := configretry.NewDefaultBackOffConfig() - be, err := newBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { assert.NoError(t, be.Shutdown(context.Background())) @@ -78,14 +78,14 @@ func TestQueuedRetry_DoNotPreserveCancellation(t *testing.T) { mockR := newMockRequest(2, nil) ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(ctx, mockR)) + require.NoError(t, be.Send(ctx, mockR)) }) ocs.awaitAsyncProcessing() mockR.checkNumRequests(t, 1) ocs.checkSendItemsCount(t, 2) ocs.checkDroppedItemsCount(t, 0) - require.Zero(t, be.queueSender.(*queueSender).queue.Size()) + require.Zero(t, be.QueueSender.(*QueueSender).queue.Size()) } func TestQueuedRetry_RejectOnFull(t *testing.T) { @@ -95,15 +95,15 @@ func TestQueuedRetry_RejectOnFull(t *testing.T) { set := exportertest.NewNopSettings() logger, observed := observer.New(zap.ErrorLevel) set.Logger = zap.New(logger) - be, err := newBaseExporter(set, defaultDataType, newNoopObsrepSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(set, defaultDataType, newNoopObsrepSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithQueue(qCfg)) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { assert.NoError(t, be.Shutdown(context.Background())) }) - require.Error(t, be.send(context.Background(), newMockRequest(2, nil))) + require.Error(t, be.Send(context.Background(), newMockRequest(2, nil))) assert.Len(t, observed.All(), 1) assert.Equal(t, "Exporting failed. Rejecting data.", observed.All()[0].Message) assert.Equal(t, "sending queue is full", observed.All()[0].ContextMap()["error"]) @@ -117,8 +117,8 @@ func TestQueuedRetryHappyPath(t *testing.T) { { name: "WithQueue", queueOptions: []Option{ - withMarshaler(mockRequestMarshaler), - withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + WithMarshaler(mockRequestMarshaler), + WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithQueue(QueueConfig{ Enabled: true, QueueSize: 10, @@ -134,7 +134,7 @@ func TestQueuedRetryHappyPath(t *testing.T) { Enabled: true, QueueSize: 10, NumConsumers: 1, - }, exporterqueue.NewMemoryQueueFactory[Request]()), + }, exporterqueue.NewMemoryQueueFactory[internal.Request]()), WithRetry(configretry.NewDefaultBackOffConfig()), }, }, @@ -145,7 +145,7 @@ func TestQueuedRetryHappyPath(t *testing.T) { Enabled: true, QueueSize: 10, NumConsumers: 1, - }, exporterqueue.NewPersistentQueueFactory[Request](nil, exporterqueue.PersistentQueueSettings[Request]{})), + }, exporterqueue.NewPersistentQueueFactory[internal.Request](nil, exporterqueue.PersistentQueueSettings[internal.Request]{})), WithRetry(configretry.NewDefaultBackOffConfig()), }, }, @@ -156,7 +156,7 @@ func TestQueuedRetryHappyPath(t *testing.T) { Enabled: true, QueueSize: 10, NumConsumers: 1, - }, exporterqueue.NewPersistentQueueFactory[Request](nil, exporterqueue.PersistentQueueSettings[Request]{})), + }, exporterqueue.NewPersistentQueueFactory[internal.Request](nil, exporterqueue.PersistentQueueSettings[internal.Request]{})), WithRetry(configretry.NewDefaultBackOffConfig()), }, }, @@ -168,9 +168,9 @@ func TestQueuedRetryHappyPath(t *testing.T) { t.Cleanup(func() { require.NoError(t, tel.Shutdown(context.Background())) }) set := exporter.Settings{ID: defaultID, TelemetrySettings: tel.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()} - be, err := newBaseExporter(set, defaultDataType, newObservabilityConsumerSender, tt.queueOptions...) + be, err := NewBaseExporter(set, defaultDataType, newObservabilityConsumerSender, tt.queueOptions...) require.NoError(t, err) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) wantRequests := 10 reqs := make([]*mockRequest, 0, 10) @@ -178,12 +178,12 @@ func TestQueuedRetryHappyPath(t *testing.T) { ocs.run(func() { req := newMockRequest(2, nil) reqs = append(reqs, req) - require.NoError(t, be.send(context.Background(), req)) + require.NoError(t, be.Send(context.Background(), req)) }) } // expect queue to be full - require.Error(t, be.send(context.Background(), newMockRequest(2, nil))) + require.Error(t, be.Send(context.Background(), newMockRequest(2, nil))) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { @@ -214,8 +214,8 @@ func TestQueuedRetry_QueueMetricsReported(t *testing.T) { qCfg.NumConsumers = 0 // to make every request go straight to the queue rCfg := configretry.NewDefaultBackOffConfig() set := exporter.Settings{ID: defaultID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()} - be, err := newBaseExporter(set, dataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(set, dataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -223,10 +223,10 @@ func TestQueuedRetry_QueueMetricsReported(t *testing.T) { require.NoError(t, tt.CheckExporterMetricGauge("otelcol_exporter_queue_capacity", int64(defaultQueueSize))) for i := 0; i < 7; i++ { - require.NoError(t, be.send(context.Background(), newErrorRequest())) + require.NoError(t, be.Send(context.Background(), newErrorRequest())) } require.NoError(t, tt.CheckExporterMetricGauge("otelcol_exporter_queue_size", int64(7), - attribute.String(internal.DataTypeKey, dataType.String()))) + attribute.String(DataTypeKey, dataType.String()))) assert.NoError(t, be.Shutdown(context.Background())) } @@ -273,8 +273,8 @@ func TestQueueRetryWithDisabledQueue(t *testing.T) { { name: "WithQueue", queueOptions: []Option{ - withMarshaler(mockRequestMarshaler), - withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + WithMarshaler(mockRequestMarshaler), + WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), func() Option { qs := NewDefaultQueueConfig() qs.Enabled = false @@ -288,7 +288,7 @@ func TestQueueRetryWithDisabledQueue(t *testing.T) { func() Option { qs := exporterqueue.NewDefaultConfig() qs.Enabled = false - return WithRequestQueue(qs, exporterqueue.NewMemoryQueueFactory[Request]()) + return WithRequestQueue(qs, exporterqueue.NewMemoryQueueFactory[internal.Request]()) }(), }, }, @@ -299,13 +299,13 @@ func TestQueueRetryWithDisabledQueue(t *testing.T) { set := exportertest.NewNopSettings() logger, observed := observer.New(zap.ErrorLevel) set.Logger = zap.New(logger) - be, err := newBaseExporter(set, component.DataTypeLogs, newObservabilityConsumerSender, tt.queueOptions...) + be, err := NewBaseExporter(set, component.DataTypeLogs, newObservabilityConsumerSender, tt.queueOptions...) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) mockR := newMockRequest(2, errors.New("some error")) ocs.run(func() { - require.Error(t, be.send(context.Background(), mockR)) + require.Error(t, be.Send(context.Background(), mockR)) }) assert.Len(t, observed.All(), 1) assert.Equal(t, "Exporting failed. Rejecting data. Try enabling sending_queue to survive temporary failures.", observed.All()[0].Message) @@ -323,12 +323,12 @@ func TestQueueFailedRequestDropped(t *testing.T) { set := exportertest.NewNopSettings() logger, observed := observer.New(zap.ErrorLevel) set.Logger = zap.New(logger) - be, err := newBaseExporter(set, component.DataTypeLogs, newNoopObsrepSender, - WithRequestQueue(exporterqueue.NewDefaultConfig(), exporterqueue.NewMemoryQueueFactory[Request]())) + be, err := NewBaseExporter(set, component.DataTypeLogs, newNoopObsrepSender, + WithRequestQueue(exporterqueue.NewDefaultConfig(), exporterqueue.NewMemoryQueueFactory[internal.Request]())) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) mockR := newMockRequest(2, errors.New("some error")) - require.NoError(t, be.send(context.Background(), mockR)) + require.NoError(t, be.Send(context.Background(), mockR)) require.NoError(t, be.Shutdown(context.Background())) mockR.checkNumRequests(t, 1) assert.Len(t, observed.All(), 1) @@ -345,15 +345,15 @@ func TestQueuedRetryPersistenceEnabled(t *testing.T) { qCfg.StorageID = &storageID // enable persistence rCfg := configretry.NewDefaultBackOffConfig() set := exporter.Settings{ID: defaultID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()} - be, err := newBaseExporter(set, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(set, defaultDataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) var extensions = map[component.ID]component.Component{ storageID: queue.NewMockStorageExtension(nil), } - host := &mockHost{ext: extensions} + host := &MockHost{Ext: extensions} // we start correctly with a file storage extension require.NoError(t, be.Start(context.Background(), host)) @@ -371,14 +371,14 @@ func TestQueuedRetryPersistenceEnabledStorageError(t *testing.T) { qCfg.StorageID = &storageID // enable persistence rCfg := configretry.NewDefaultBackOffConfig() set := exporter.Settings{ID: defaultID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()} - be, err := newBaseExporter(set, defaultDataType, newObservabilityConsumerSender, withMarshaler(mockRequestMarshaler), - withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) + be, err := NewBaseExporter(set, defaultDataType, newObservabilityConsumerSender, WithMarshaler(mockRequestMarshaler), + WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) var extensions = map[component.ID]component.Component{ storageID: queue.NewMockStorageExtension(storageError), } - host := &mockHost{ext: extensions} + host := &MockHost{Ext: extensions} // we fail to start if we get an error creating the storage client require.Error(t, be.Start(context.Background(), host), "could not get storage client") @@ -395,23 +395,23 @@ func TestQueuedRetryPersistentEnabled_NoDataLossOnShutdown(t *testing.T) { rCfg.MaxElapsedTime = 0 // retry infinitely so shutdown can be triggered mockReq := newErrorRequest() - be, err := newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, withMarshaler(mockRequestMarshaler), - withUnmarshaler(mockRequestUnmarshaler(mockReq)), WithRetry(rCfg), WithQueue(qCfg)) + be, err := NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithMarshaler(mockRequestMarshaler), + WithUnmarshaler(mockRequestUnmarshaler(mockReq)), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) var extensions = map[component.ID]component.Component{ storageID: queue.NewMockStorageExtension(nil), } - host := &mockHost{ext: extensions} + host := &MockHost{Ext: extensions} require.NoError(t, be.Start(context.Background(), host)) // Invoke queuedRetrySender so the producer will put the item for consumer to poll - require.NoError(t, be.send(context.Background(), mockReq)) + require.NoError(t, be.Send(context.Background(), mockReq)) // first wait for the item to be consumed from the queue assert.Eventually(t, func() bool { - return be.queueSender.(*queueSender).queue.Size() == 0 + return be.QueueSender.(*QueueSender).queue.Size() == 0 }, time.Second, 1*time.Millisecond) // shuts down the exporter, unsent data should be preserved as in-flight data in the persistent queue. @@ -419,8 +419,8 @@ func TestQueuedRetryPersistentEnabled_NoDataLossOnShutdown(t *testing.T) { // start the exporter again replacing the preserved mockRequest in the unmarshaler with a new one that doesn't fail. replacedReq := newMockRequest(1, nil) - be, err = newBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, withMarshaler(mockRequestMarshaler), - withUnmarshaler(mockRequestUnmarshaler(replacedReq)), WithRetry(rCfg), WithQueue(qCfg)) + be, err = NewBaseExporter(defaultSettings, defaultDataType, newNoopObsrepSender, WithMarshaler(mockRequestMarshaler), + WithUnmarshaler(mockRequestUnmarshaler(replacedReq)), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), host)) t.Cleanup(func() { require.NoError(t, be.Shutdown(context.Background())) }) @@ -430,22 +430,13 @@ func TestQueuedRetryPersistentEnabled_NoDataLossOnShutdown(t *testing.T) { } func TestQueueSenderNoStartShutdown(t *testing.T) { - queue := queue.NewBoundedMemoryQueue[Request](queue.MemoryQueueSettings[Request]{}) + queue := queue.NewBoundedMemoryQueue[internal.Request](queue.MemoryQueueSettings[internal.Request]{}) set := exportertest.NewNopSettings() - obsrep, err := newExporter(obsReportSettings{ - exporterID: exporterID, - exporterCreateSettings: exportertest.NewNopSettings(), + obsrep, err := NewExporter(ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exportertest.NewNopSettings(), }) assert.NoError(t, err) - qs := newQueueSender(queue, set, 1, "", obsrep) + qs := NewQueueSender(queue, set, 1, "", obsrep) assert.NoError(t, qs.Shutdown(context.Background())) } - -type mockHost struct { - component.Host - ext map[component.ID]component.Component -} - -func (nh *mockHost) GetExtensions() map[component.ID]component.Component { - return nh.ext -} diff --git a/exporter/exporterhelper/request_test.go b/exporter/exporterhelper/internal/request.go similarity index 67% rename from exporter/exporterhelper/request_test.go rename to exporter/exporterhelper/internal/request.go index 5f48b2b674f..266ce05c241 100644 --- a/exporter/exporterhelper/request_test.go +++ b/exporter/exporterhelper/internal/request.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" import ( "context" @@ -9,6 +9,7 @@ import ( "time" "go.opentelemetry.io/collector/exporter/exporterbatcher" + "go.opentelemetry.io/collector/exporter/internal" "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" "go.opentelemetry.io/collector/pdata/pprofile" @@ -55,7 +56,7 @@ func (r *fakeRequest) ItemsCount() int { return r.items } -func fakeBatchMergeFunc(_ context.Context, r1 Request, r2 Request) (Request, error) { +func fakeBatchMergeFunc(_ context.Context, r1 internal.Request, r2 internal.Request) (internal.Request, error) { if r1 == nil { return r2, nil } @@ -72,11 +73,11 @@ func fakeBatchMergeFunc(_ context.Context, r1 Request, r2 Request) (Request, err }, nil } -func fakeBatchMergeSplitFunc(ctx context.Context, cfg exporterbatcher.MaxSizeConfig, r1 Request, r2 Request) ([]Request, error) { +func fakeBatchMergeSplitFunc(ctx context.Context, cfg exporterbatcher.MaxSizeConfig, r1 internal.Request, r2 internal.Request) ([]internal.Request, error) { maxItems := cfg.MaxSizeItems if maxItems == 0 { r, err := fakeBatchMergeFunc(ctx, r1, r2) - return []Request{r}, err + return []internal.Request{r}, err } if r2.(*fakeRequest).mergeErr != nil { @@ -85,7 +86,7 @@ func fakeBatchMergeSplitFunc(ctx context.Context, cfg exporterbatcher.MaxSizeCon fr2 := r2.(*fakeRequest) fr2 = &fakeRequest{items: fr2.items, sink: fr2.sink, exportErr: fr2.exportErr, delay: fr2.delay} - var res []Request + var res []internal.Request // fill fr1 to maxItems if it's not nil if r1 != nil { @@ -96,7 +97,7 @@ func fakeBatchMergeSplitFunc(ctx context.Context, cfg exporterbatcher.MaxSizeCon if fr2.exportErr != nil { fr1.exportErr = fr2.exportErr } - return []Request{fr1}, nil + return []internal.Request{fr1}, nil } // if split is needed, we don't propagate exportErr from fr2 to fr1 to test more cases fr2.items -= maxItems - fr1.items @@ -117,26 +118,26 @@ func fakeBatchMergeSplitFunc(ctx context.Context, cfg exporterbatcher.MaxSizeCon return res, nil } -type fakeRequestConverter struct { - metricsError error - tracesError error - logsError error - profilesError error - requestError error +type FakeRequestConverter struct { + MetricsError error + TracesError error + LogsError error + ProfilesError error + RequestError error } -func (frc *fakeRequestConverter) requestFromMetricsFunc(_ context.Context, md pmetric.Metrics) (Request, error) { - return &fakeRequest{items: md.DataPointCount(), exportErr: frc.requestError}, frc.metricsError +func (frc *FakeRequestConverter) RequestFromMetricsFunc(_ context.Context, md pmetric.Metrics) (internal.Request, error) { + return &fakeRequest{items: md.DataPointCount(), exportErr: frc.RequestError}, frc.MetricsError } -func (frc *fakeRequestConverter) requestFromTracesFunc(_ context.Context, md ptrace.Traces) (Request, error) { - return &fakeRequest{items: md.SpanCount(), exportErr: frc.requestError}, frc.tracesError +func (frc *FakeRequestConverter) RequestFromTracesFunc(_ context.Context, md ptrace.Traces) (internal.Request, error) { + return &fakeRequest{items: md.SpanCount(), exportErr: frc.RequestError}, frc.TracesError } -func (frc *fakeRequestConverter) requestFromLogsFunc(_ context.Context, md plog.Logs) (Request, error) { - return &fakeRequest{items: md.LogRecordCount(), exportErr: frc.requestError}, frc.logsError +func (frc *FakeRequestConverter) RequestFromLogsFunc(_ context.Context, md plog.Logs) (internal.Request, error) { + return &fakeRequest{items: md.LogRecordCount(), exportErr: frc.RequestError}, frc.LogsError } -func (frc *fakeRequestConverter) requestFromProfilesFunc(_ context.Context, md pprofile.Profiles) (Request, error) { - return &fakeRequest{items: md.SampleCount(), exportErr: frc.requestError}, frc.profilesError +func (frc *FakeRequestConverter) RequestFromProfilesFunc(_ context.Context, md pprofile.Profiles) (internal.Request, error) { + return &fakeRequest{items: md.SampleCount(), exportErr: frc.RequestError}, frc.ProfilesError } diff --git a/exporter/exporterhelper/internal/request_sender.go b/exporter/exporterhelper/internal/request_sender.go new file mode 100644 index 00000000000..683aca40d79 --- /dev/null +++ b/exporter/exporterhelper/internal/request_sender.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" // RequestSender is an abstraction of a sender for a request independent of the type of the data (traces, metrics, logs). + + "go.opentelemetry.io/collector/component" + "go.opentelemetry.io/collector/exporter/internal" +) + +type RequestSender interface { + component.Component + Send(context.Context, internal.Request) error + SetNextSender(nextSender RequestSender) +} + +type BaseRequestSender struct { + component.StartFunc + component.ShutdownFunc + NextSender RequestSender +} + +var _ RequestSender = (*BaseRequestSender)(nil) + +func (b *BaseRequestSender) Send(ctx context.Context, req internal.Request) error { + return b.NextSender.Send(ctx, req) +} + +func (b *BaseRequestSender) SetNextSender(nextSender RequestSender) { + b.NextSender = nextSender +} diff --git a/exporter/exporterhelper/internal/retry_sender.go b/exporter/exporterhelper/internal/retry_sender.go new file mode 100644 index 00000000000..c6648785183 --- /dev/null +++ b/exporter/exporterhelper/internal/retry_sender.go @@ -0,0 +1,142 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/cenkalti/backoff/v4" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" + + "go.opentelemetry.io/collector/config/configretry" + "go.opentelemetry.io/collector/consumer/consumererror" + "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/internal" + "go.opentelemetry.io/collector/exporter/internal/experr" +) + +// TODO: Clean this by forcing all exporters to return an internal error type that always include the information about retries. +type throttleRetry struct { + err error + delay time.Duration +} + +func (t throttleRetry) Error() string { + return "Throttle (" + t.delay.String() + "), error: " + t.err.Error() +} + +func (t throttleRetry) Unwrap() error { + return t.err +} + +// NewThrottleRetry creates a new throttle retry error. +func NewThrottleRetry(err error, delay time.Duration) error { + return throttleRetry{ + err: err, + delay: delay, + } +} + +type retrySender struct { + BaseRequestSender + traceAttribute attribute.KeyValue + cfg configretry.BackOffConfig + stopCh chan struct{} + logger *zap.Logger +} + +func newRetrySender(config configretry.BackOffConfig, set exporter.Settings) *retrySender { + return &retrySender{ + traceAttribute: attribute.String(ExporterKey, set.ID.String()), + cfg: config, + stopCh: make(chan struct{}), + logger: set.Logger, + } +} + +func (rs *retrySender) Shutdown(context.Context) error { + close(rs.stopCh) + return nil +} + +// send implements the requestSender interface +func (rs *retrySender) Send(ctx context.Context, req internal.Request) error { + // Do not use NewExponentialBackOff since it calls Reset and the code here must + // call Reset after changing the InitialInterval (this saves an unnecessary call to Now). + expBackoff := backoff.ExponentialBackOff{ + InitialInterval: rs.cfg.InitialInterval, + RandomizationFactor: rs.cfg.RandomizationFactor, + Multiplier: rs.cfg.Multiplier, + MaxInterval: rs.cfg.MaxInterval, + MaxElapsedTime: rs.cfg.MaxElapsedTime, + Stop: backoff.Stop, + Clock: backoff.SystemClock, + } + expBackoff.Reset() + span := trace.SpanFromContext(ctx) + retryNum := int64(0) + for { + span.AddEvent( + "Sending request.", + trace.WithAttributes(rs.traceAttribute, attribute.Int64("retry_num", retryNum))) + + err := rs.NextSender.Send(ctx, req) + if err == nil { + return nil + } + + // Immediately drop data on permanent errors. + if consumererror.IsPermanent(err) { + return fmt.Errorf("not retryable error: %w", err) + } + + req = internal.ExtractPartialRequest(req, err) + + backoffDelay := expBackoff.NextBackOff() + if backoffDelay == backoff.Stop { + return fmt.Errorf("no more retries left: %w", err) + } + + throttleErr := throttleRetry{} + if errors.As(err, &throttleErr) { + backoffDelay = max(backoffDelay, throttleErr.delay) + } + + backoffDelayStr := backoffDelay.String() + span.AddEvent( + "Exporting failed. Will retry the request after interval.", + trace.WithAttributes( + rs.traceAttribute, + attribute.String("interval", backoffDelayStr), + attribute.String("error", err.Error()))) + rs.logger.Info( + "Exporting failed. Will retry the request after interval.", + zap.Error(err), + zap.String("interval", backoffDelayStr), + ) + retryNum++ + + // back-off, but get interrupted when shutting down or request is cancelled or timed out. + select { + case <-ctx.Done(): + return fmt.Errorf("request is cancelled or timed out %w", err) + case <-rs.stopCh: + return experr.NewShutdownErr(err) + case <-time.After(backoffDelay): + } + } +} + +// max returns the larger of x or y. +func max(x, y time.Duration) time.Duration { + if x < y { + return y + } + return x +} diff --git a/exporter/exporterhelper/retry_sender_test.go b/exporter/exporterhelper/internal/retry_sender_test.go similarity index 78% rename from exporter/exporterhelper/retry_sender_test.go rename to exporter/exporterhelper/internal/retry_sender_test.go index a0dab1f8782..f4cc0f5ee0b 100644 --- a/exporter/exporterhelper/retry_sender_test.go +++ b/exporter/exporterhelper/internal/retry_sender_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper +package internal import ( "context" @@ -22,16 +22,17 @@ import ( "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/exportertest" + "go.opentelemetry.io/collector/exporter/internal" "go.opentelemetry.io/collector/pdata/testdata" ) -func mockRequestUnmarshaler(mr Request) exporterqueue.Unmarshaler[Request] { - return func([]byte) (Request, error) { +func mockRequestUnmarshaler(mr internal.Request) exporterqueue.Unmarshaler[internal.Request] { + return func([]byte) (internal.Request, error) { return mr, nil } } -func mockRequestMarshaler(Request) ([]byte, error) { +func mockRequestMarshaler(internal.Request) ([]byte, error) { return []byte("mockRequest"), nil } @@ -39,10 +40,10 @@ func TestQueuedRetry_DropOnPermanentError(t *testing.T) { qCfg := NewDefaultQueueConfig() rCfg := configretry.NewDefaultBackOffConfig() mockR := newMockRequest(2, consumererror.NewPermanent(errors.New("bad data"))) - be, err := newBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(mockR)), WithRetry(rCfg), WithQueue(qCfg)) + be, err := NewBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(mockR)), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { assert.NoError(t, be.Shutdown(context.Background())) @@ -50,7 +51,7 @@ func TestQueuedRetry_DropOnPermanentError(t *testing.T) { ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(context.Background(), mockR)) + require.NoError(t, be.Send(context.Background(), mockR)) }) ocs.awaitAsyncProcessing() // In the newMockConcurrentExporter we count requests and items even for failed requests @@ -63,11 +64,11 @@ func TestQueuedRetry_DropOnNoRetry(t *testing.T) { qCfg := NewDefaultQueueConfig() rCfg := configretry.NewDefaultBackOffConfig() rCfg.Enabled = false - be, err := newBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, withMarshaler(mockRequestMarshaler), - withUnmarshaler(mockRequestUnmarshaler(newMockRequest(2, errors.New("transient error")))), + be, err := NewBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, WithMarshaler(mockRequestMarshaler), + WithUnmarshaler(mockRequestUnmarshaler(newMockRequest(2, errors.New("transient error")))), WithQueue(qCfg), WithRetry(rCfg)) require.NoError(t, err) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { assert.NoError(t, be.Shutdown(context.Background())) @@ -76,7 +77,7 @@ func TestQueuedRetry_DropOnNoRetry(t *testing.T) { mockR := newMockRequest(2, errors.New("transient error")) ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(context.Background(), mockR)) + require.NoError(t, be.Send(context.Background(), mockR)) }) ocs.awaitAsyncProcessing() // In the newMockConcurrentExporter we count requests and items even for failed requests @@ -90,8 +91,8 @@ func TestQueuedRetry_OnError(t *testing.T) { qCfg.NumConsumers = 1 rCfg := configretry.NewDefaultBackOffConfig() rCfg.InitialInterval = 0 - be, err := newBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) @@ -101,10 +102,10 @@ func TestQueuedRetry_OnError(t *testing.T) { traceErr := consumererror.NewTraces(errors.New("some error"), testdata.GenerateTraces(1)) mockR := newMockRequest(2, traceErr) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(context.Background(), mockR)) + require.NoError(t, be.Send(context.Background(), mockR)) }) ocs.awaitAsyncProcessing() @@ -120,11 +121,11 @@ func TestQueuedRetry_MaxElapsedTime(t *testing.T) { rCfg := configretry.NewDefaultBackOffConfig() rCfg.InitialInterval = time.Millisecond rCfg.MaxElapsedTime = 100 * time.Millisecond - be, err := newBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { assert.NoError(t, be.Shutdown(context.Background())) @@ -132,14 +133,14 @@ func TestQueuedRetry_MaxElapsedTime(t *testing.T) { ocs.run(func() { // Add an item that will always fail. - require.NoError(t, be.send(context.Background(), newErrorRequest())) + require.NoError(t, be.Send(context.Background(), newErrorRequest())) }) mockR := newMockRequest(2, nil) start := time.Now() ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(context.Background(), mockR)) + require.NoError(t, be.Send(context.Background(), mockR)) }) ocs.awaitAsyncProcessing() @@ -152,7 +153,7 @@ func TestQueuedRetry_MaxElapsedTime(t *testing.T) { mockR.checkNumRequests(t, 1) ocs.checkSendItemsCount(t, 2) ocs.checkDroppedItemsCount(t, 7) - require.Zero(t, be.queueSender.(*queueSender).queue.Size()) + require.Zero(t, be.QueueSender.(*QueueSender).queue.Size()) } type wrappedError struct { @@ -168,11 +169,11 @@ func TestQueuedRetry_ThrottleError(t *testing.T) { qCfg.NumConsumers = 1 rCfg := configretry.NewDefaultBackOffConfig() rCfg.InitialInterval = 10 * time.Millisecond - be, err := newBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { assert.NoError(t, be.Shutdown(context.Background())) @@ -183,7 +184,7 @@ func TestQueuedRetry_ThrottleError(t *testing.T) { start := time.Now() ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(context.Background(), mockR)) + require.NoError(t, be.Send(context.Background(), mockR)) }) ocs.awaitAsyncProcessing() @@ -193,7 +194,7 @@ func TestQueuedRetry_ThrottleError(t *testing.T) { mockR.checkNumRequests(t, 2) ocs.checkSendItemsCount(t, 2) ocs.checkDroppedItemsCount(t, 0) - require.Zero(t, be.queueSender.(*queueSender).queue.Size()) + require.Zero(t, be.QueueSender.(*QueueSender).queue.Size()) } func TestQueuedRetry_RetryOnError(t *testing.T) { @@ -202,11 +203,11 @@ func TestQueuedRetry_RetryOnError(t *testing.T) { qCfg.QueueSize = 1 rCfg := configretry.NewDefaultBackOffConfig() rCfg.InitialInterval = 0 - be, err := newBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, - withMarshaler(mockRequestMarshaler), withUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), + be, err := NewBaseExporter(defaultSettings, defaultDataType, newObservabilityConsumerSender, + WithMarshaler(mockRequestMarshaler), WithUnmarshaler(mockRequestUnmarshaler(&mockRequest{})), WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) t.Cleanup(func() { assert.NoError(t, be.Shutdown(context.Background())) @@ -215,7 +216,7 @@ func TestQueuedRetry_RetryOnError(t *testing.T) { mockR := newMockRequest(2, errors.New("transient error")) ocs.run(func() { // This is asynchronous so it should just enqueue, no errors expected. - require.NoError(t, be.send(context.Background(), mockR)) + require.NoError(t, be.Send(context.Background(), mockR)) }) ocs.awaitAsyncProcessing() @@ -223,19 +224,19 @@ func TestQueuedRetry_RetryOnError(t *testing.T) { mockR.checkNumRequests(t, 2) ocs.checkSendItemsCount(t, 2) ocs.checkDroppedItemsCount(t, 0) - require.Zero(t, be.queueSender.(*queueSender).queue.Size()) + require.Zero(t, be.QueueSender.(*QueueSender).queue.Size()) } func TestQueueRetryWithNoQueue(t *testing.T) { rCfg := configretry.NewDefaultBackOffConfig() rCfg.MaxElapsedTime = time.Nanosecond // fail fast - be, err := newBaseExporter(exportertest.NewNopSettings(), component.DataTypeLogs, newObservabilityConsumerSender, WithRetry(rCfg)) + be, err := NewBaseExporter(exportertest.NewNopSettings(), component.DataTypeLogs, newObservabilityConsumerSender, WithRetry(rCfg)) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) mockR := newMockRequest(2, errors.New("some error")) ocs.run(func() { - require.Error(t, be.send(context.Background(), mockR)) + require.Error(t, be.Send(context.Background(), mockR)) }) ocs.awaitAsyncProcessing() mockR.checkNumRequests(t, 1) @@ -250,13 +251,13 @@ func TestQueueRetryWithDisabledRetires(t *testing.T) { set := exportertest.NewNopSettings() logger, observed := observer.New(zap.ErrorLevel) set.Logger = zap.New(logger) - be, err := newBaseExporter(set, component.DataTypeLogs, newObservabilityConsumerSender, WithRetry(rCfg)) + be, err := NewBaseExporter(set, component.DataTypeLogs, newObservabilityConsumerSender, WithRetry(rCfg)) require.NoError(t, err) require.NoError(t, be.Start(context.Background(), componenttest.NewNopHost())) - ocs := be.obsrepSender.(*observabilityConsumerSender) + ocs := be.ObsrepSender.(*observabilityConsumerSender) mockR := newMockRequest(2, errors.New("some error")) ocs.run(func() { - require.Error(t, be.send(context.Background(), mockR)) + require.Error(t, be.Send(context.Background(), mockR)) }) assert.Len(t, observed.All(), 1) assert.Equal(t, "Exporting failed. Rejecting data. "+ @@ -274,7 +275,7 @@ func (mer *mockErrorRequest) Export(context.Context) error { return errors.New("transient error") } -func (mer *mockErrorRequest) OnError(error) Request { +func (mer *mockErrorRequest) OnError(error) internal.Request { return mer } @@ -282,7 +283,7 @@ func (mer *mockErrorRequest) ItemsCount() int { return 7 } -func newErrorRequest() Request { +func newErrorRequest() internal.Request { return &mockErrorRequest{} } @@ -306,7 +307,7 @@ func (m *mockRequest) Export(ctx context.Context) error { return ctx.Err() } -func (m *mockRequest) OnError(error) Request { +func (m *mockRequest) OnError(error) internal.Request { return &mockRequest{ cnt: 1, consumeError: nil, @@ -333,13 +334,13 @@ func newMockRequest(cnt int, consumeError error) *mockRequest { } type observabilityConsumerSender struct { - baseRequestSender + BaseRequestSender waitGroup *sync.WaitGroup sentItemsCount *atomic.Int64 droppedItemsCount *atomic.Int64 } -func newObservabilityConsumerSender(*obsReport) requestSender { +func newObservabilityConsumerSender(*ObsReport) RequestSender { return &observabilityConsumerSender{ waitGroup: new(sync.WaitGroup), droppedItemsCount: &atomic.Int64{}, @@ -347,8 +348,8 @@ func newObservabilityConsumerSender(*obsReport) requestSender { } } -func (ocs *observabilityConsumerSender) send(ctx context.Context, req Request) error { - err := ocs.nextSender.send(ctx, req) +func (ocs *observabilityConsumerSender) Send(ctx context.Context, req internal.Request) error { + err := ocs.NextSender.Send(ctx, req) if err != nil { ocs.droppedItemsCount.Add(int64(req.ItemsCount())) } else { diff --git a/exporter/exporterhelper/internal/timeout_sender.go b/exporter/exporterhelper/internal/timeout_sender.go new file mode 100644 index 00000000000..5abae1b6746 --- /dev/null +++ b/exporter/exporterhelper/internal/timeout_sender.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" + +import ( + "context" + "errors" + "time" + + "go.opentelemetry.io/collector/exporter/internal" +) + +// TimeoutConfig for timeout. The timeout applies to individual attempts to send data to the backend. +type TimeoutConfig struct { + // Timeout is the timeout for every attempt to send data to the backend. + // A zero timeout means no timeout. + Timeout time.Duration `mapstructure:"timeout"` +} + +func (ts *TimeoutConfig) Validate() error { + // Negative timeouts are not acceptable, since all sends will fail. + if ts.Timeout < 0 { + return errors.New("'timeout' must be non-negative") + } + return nil +} + +// NewDefaultTimeoutConfig returns the default config for TimeoutConfig. +func NewDefaultTimeoutConfig() TimeoutConfig { + return TimeoutConfig{ + Timeout: 5 * time.Second, + } +} + +// TimeoutSender is a requestSender that adds a `timeout` to every request that passes this sender. +type TimeoutSender struct { + BaseRequestSender + cfg TimeoutConfig +} + +func (ts *TimeoutSender) Send(ctx context.Context, req internal.Request) error { + // TODO: Remove this by avoiding to create the timeout sender if timeout is 0. + if ts.cfg.Timeout == 0 { + return req.Export(ctx) + } + // Intentionally don't overwrite the context inside the request, because in case of retries deadline will not be + // updated because this deadline most likely is before the next one. + tCtx, cancelFunc := context.WithTimeout(ctx, ts.cfg.Timeout) + defer cancelFunc() + return req.Export(tCtx) +} diff --git a/exporter/exporterhelper/timeout_sender_test.go b/exporter/exporterhelper/internal/timeout_sender_test.go similarity index 95% rename from exporter/exporterhelper/timeout_sender_test.go rename to exporter/exporterhelper/internal/timeout_sender_test.go index a0dac589821..bc838ef7bc2 100644 --- a/exporter/exporterhelper/timeout_sender_test.go +++ b/exporter/exporterhelper/internal/timeout_sender_test.go @@ -1,7 +1,7 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package exporterhelper +package internal import ( "testing" diff --git a/exporter/exporterhelper/logs.go b/exporter/exporterhelper/logs.go index 790ba188657..795bf91408e 100644 --- a/exporter/exporterhelper/logs.go +++ b/exporter/exporterhelper/logs.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/internal/queue" "go.opentelemetry.io/collector/pdata/plog" @@ -64,7 +65,7 @@ func (req *logsRequest) ItemsCount() int { } type logsExporter struct { - *baseExporter + *internal.BaseExporter consumer.Logs } @@ -83,8 +84,8 @@ func NewLogsExporter( return nil, errNilPushLogsData } logsOpts := []Option{ - withMarshaler(logsRequestMarshaler), withUnmarshaler(newLogsRequestUnmarshalerFunc(pusher)), - withBatchFuncs(mergeLogs, mergeSplitLogs), + internal.WithMarshaler(logsRequestMarshaler), internal.WithUnmarshaler(newLogsRequestUnmarshalerFunc(pusher)), + internal.WithBatchFuncs(mergeLogs, mergeSplitLogs), } return NewLogsRequestExporter(ctx, set, requestFromLogs(pusher), append(logsOpts, options...)...) } @@ -118,7 +119,7 @@ func NewLogsRequestExporter( return nil, errNilLogsConverter } - be, err := newBaseExporter(set, component.DataTypeLogs, newLogsExporterWithObservability, options...) + be, err := internal.NewBaseExporter(set, component.DataTypeLogs, newLogsExporterWithObservability, options...) if err != nil { return nil, err } @@ -131,32 +132,32 @@ func NewLogsRequestExporter( zap.Error(err)) return consumererror.NewPermanent(cErr) } - sErr := be.send(ctx, req) + sErr := be.Send(ctx, req) if errors.Is(sErr, queue.ErrQueueIsFull) { - be.obsrep.recordEnqueueFailure(ctx, component.DataTypeLogs, int64(req.ItemsCount())) + be.Obsrep.RecordEnqueueFailure(ctx, component.DataTypeLogs, int64(req.ItemsCount())) } return sErr - }, be.consumerOptions...) + }, be.ConsumerOptions...) return &logsExporter{ - baseExporter: be, + BaseExporter: be, Logs: lc, }, err } type logsExporterWithObservability struct { - baseRequestSender - obsrep *obsReport + internal.BaseRequestSender + obsrep *internal.ObsReport } -func newLogsExporterWithObservability(obsrep *obsReport) requestSender { +func newLogsExporterWithObservability(obsrep *internal.ObsReport) internal.RequestSender { return &logsExporterWithObservability{obsrep: obsrep} } -func (lewo *logsExporterWithObservability) send(ctx context.Context, req Request) error { - c := lewo.obsrep.startLogsOp(ctx) +func (lewo *logsExporterWithObservability) Send(ctx context.Context, req Request) error { + c := lewo.obsrep.StartLogsOp(ctx) numLogRecords := req.ItemsCount() - err := lewo.nextSender.send(c, req) - lewo.obsrep.endLogsOp(c, numLogRecords, err) + err := lewo.NextSender.Send(c, req) + lewo.obsrep.EndLogsOp(c, numLogRecords, err) return err } diff --git a/exporter/exporterhelper/logs_test.go b/exporter/exporterhelper/logs_test.go index e42a40159d0..03ce231a2c9 100644 --- a/exporter/exporterhelper/logs_test.go +++ b/exporter/exporterhelper/logs_test.go @@ -65,7 +65,7 @@ func TestLogsExporter_NilLogger(t *testing.T) { } func TestLogsRequestExporter_NilLogger(t *testing.T) { - le, err := NewLogsRequestExporter(context.Background(), exporter.Settings{}, (&fakeRequestConverter{}).requestFromLogsFunc) + le, err := NewLogsRequestExporter(context.Background(), exporter.Settings{}, (&internal.FakeRequestConverter{}).RequestFromLogsFunc) require.Nil(t, le) require.Equal(t, errNilLogger, err) } @@ -97,7 +97,7 @@ func TestLogsExporter_Default(t *testing.T) { func TestLogsRequestExporter_Default(t *testing.T) { ld := plog.NewLogs() le, err := NewLogsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromLogsFunc) + (&internal.FakeRequestConverter{}).RequestFromLogsFunc) assert.NotNil(t, le) assert.NoError(t, err) @@ -119,7 +119,7 @@ func TestLogsExporter_WithCapabilities(t *testing.T) { func TestLogsRequestExporter_WithCapabilities(t *testing.T) { capabilities := consumer.Capabilities{MutatesData: true} le, err := NewLogsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromLogsFunc, WithCapabilities(capabilities)) + (&internal.FakeRequestConverter{}).RequestFromLogsFunc, WithCapabilities(capabilities)) require.NoError(t, err) require.NotNil(t, le) @@ -139,7 +139,7 @@ func TestLogsRequestExporter_Default_ConvertError(t *testing.T) { ld := plog.NewLogs() want := errors.New("convert_error") le, err := NewLogsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{logsError: want}).requestFromLogsFunc) + (&internal.FakeRequestConverter{LogsError: want}).RequestFromLogsFunc) require.NoError(t, err) require.NotNil(t, le) require.Equal(t, consumererror.NewPermanent(want), le.ConsumeLogs(context.Background(), ld)) @@ -149,7 +149,7 @@ func TestLogsRequestExporter_Default_ExportError(t *testing.T) { ld := plog.NewLogs() want := errors.New("export_error") le, err := NewLogsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{requestError: want}).requestFromLogsFunc) + (&internal.FakeRequestConverter{RequestError: want}).RequestFromLogsFunc) require.NoError(t, err) require.NotNil(t, le) require.Equal(t, want, le.ConsumeLogs(context.Background(), ld)) @@ -166,7 +166,7 @@ func TestLogsExporter_WithPersistentQueue(t *testing.T) { te, err := NewLogsExporter(context.Background(), set, &fakeLogsExporterConfig, ts.ConsumeLogs, WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - host := &mockHost{ext: map[component.ID]component.Component{ + host := &internal.MockHost{Ext: map[component.ID]component.Component{ storageID: queue.NewMockStorageExtension(nil), }} require.NoError(t, te.Start(context.Background(), host)) @@ -213,7 +213,7 @@ func TestLogsRequestExporter_WithRecordMetrics(t *testing.T) { le, err := NewLogsRequestExporter(context.Background(), exporter.Settings{ID: fakeLogsExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, - (&fakeRequestConverter{}).requestFromLogsFunc) + (&internal.FakeRequestConverter{}).RequestFromLogsFunc) require.NoError(t, err) require.NotNil(t, le) @@ -240,7 +240,7 @@ func TestLogsRequestExporter_WithRecordMetrics_ExportError(t *testing.T) { t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) le, err := NewLogsRequestExporter(context.Background(), exporter.Settings{ID: fakeLogsExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, - (&fakeRequestConverter{requestError: want}).requestFromLogsFunc) + (&internal.FakeRequestConverter{RequestError: want}).RequestFromLogsFunc) require.NoError(t, err) require.NotNil(t, le) @@ -292,7 +292,7 @@ func TestLogsRequestExporter_WithSpan(t *testing.T) { otel.SetTracerProvider(set.TracerProvider) defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) - le, err := NewLogsRequestExporter(context.Background(), set, (&fakeRequestConverter{}).requestFromLogsFunc) + le, err := NewLogsRequestExporter(context.Background(), set, (&internal.FakeRequestConverter{}).RequestFromLogsFunc) require.NoError(t, err) require.NotNil(t, le) checkWrapSpanForLogsExporter(t, sr, set.TracerProvider.Tracer("test"), le, nil, 1) @@ -320,7 +320,7 @@ func TestLogsRequestExporter_WithSpan_ReturnError(t *testing.T) { defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) want := errors.New("my_error") - le, err := NewLogsRequestExporter(context.Background(), set, (&fakeRequestConverter{requestError: want}).requestFromLogsFunc) + le, err := NewLogsRequestExporter(context.Background(), set, (&internal.FakeRequestConverter{RequestError: want}).RequestFromLogsFunc) require.NoError(t, err) require.NotNil(t, le) checkWrapSpanForLogsExporter(t, sr, set.TracerProvider.Tracer("test"), le, want, 1) @@ -343,7 +343,7 @@ func TestLogsRequestExporter_WithShutdown(t *testing.T) { shutdown := func(context.Context) error { shutdownCalled = true; return nil } le, err := NewLogsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromLogsFunc, WithShutdown(shutdown)) + (&internal.FakeRequestConverter{}).RequestFromLogsFunc, WithShutdown(shutdown)) assert.NotNil(t, le) assert.NoError(t, err) @@ -367,7 +367,7 @@ func TestLogsRequestExporter_WithShutdown_ReturnError(t *testing.T) { shutdownErr := func(context.Context) error { return want } le, err := NewLogsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromLogsFunc, WithShutdown(shutdownErr)) + (&internal.FakeRequestConverter{}).RequestFromLogsFunc, WithShutdown(shutdownErr)) assert.NotNil(t, le) assert.NoError(t, err) @@ -424,7 +424,7 @@ func checkWrapSpanForLogsExporter(t *testing.T, sr *tracetest.SpanRecorder, trac require.Equalf(t, fakeLogsParentSpanName, parentSpan.Name(), "SpanData %v", parentSpan) for _, sd := range gotSpanData[:numRequests] { require.Equalf(t, parentSpan.SpanContext(), sd.Parent(), "Exporter span not a child\nSpanData %v", sd) - checkStatus(t, sd, wantError) + internal.CheckStatus(t, sd, wantError) sentLogRecords := numLogRecords var failedToSendLogRecords int64 diff --git a/exporter/exporterhelper/metrics.go b/exporter/exporterhelper/metrics.go index 382c9b9ce8a..f78fc2fc972 100644 --- a/exporter/exporterhelper/metrics.go +++ b/exporter/exporterhelper/metrics.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/internal/queue" "go.opentelemetry.io/collector/pdata/pmetric" @@ -64,7 +65,7 @@ func (req *metricsRequest) ItemsCount() int { } type metricsExporter struct { - *baseExporter + *internal.BaseExporter consumer.Metrics } @@ -83,8 +84,8 @@ func NewMetricsExporter( return nil, errNilPushMetricsData } metricsOpts := []Option{ - withMarshaler(metricsRequestMarshaler), withUnmarshaler(newMetricsRequestUnmarshalerFunc(pusher)), - withBatchFuncs(mergeMetrics, mergeSplitMetrics), + internal.WithMarshaler(metricsRequestMarshaler), internal.WithUnmarshaler(newMetricsRequestUnmarshalerFunc(pusher)), + internal.WithBatchFuncs(mergeMetrics, mergeSplitMetrics), } return NewMetricsRequestExporter(ctx, set, requestFromMetrics(pusher), append(metricsOpts, options...)...) } @@ -118,7 +119,7 @@ func NewMetricsRequestExporter( return nil, errNilMetricsConverter } - be, err := newBaseExporter(set, component.DataTypeMetrics, newMetricsSenderWithObservability, options...) + be, err := internal.NewBaseExporter(set, component.DataTypeMetrics, newMetricsSenderWithObservability, options...) if err != nil { return nil, err } @@ -131,32 +132,32 @@ func NewMetricsRequestExporter( zap.Error(err)) return consumererror.NewPermanent(cErr) } - sErr := be.send(ctx, req) + sErr := be.Send(ctx, req) if errors.Is(sErr, queue.ErrQueueIsFull) { - be.obsrep.recordEnqueueFailure(ctx, component.DataTypeMetrics, int64(req.ItemsCount())) + be.Obsrep.RecordEnqueueFailure(ctx, component.DataTypeMetrics, int64(req.ItemsCount())) } return sErr - }, be.consumerOptions...) + }, be.ConsumerOptions...) return &metricsExporter{ - baseExporter: be, + BaseExporter: be, Metrics: mc, }, err } type metricsSenderWithObservability struct { - baseRequestSender - obsrep *obsReport + internal.BaseRequestSender + obsrep *internal.ObsReport } -func newMetricsSenderWithObservability(obsrep *obsReport) requestSender { +func newMetricsSenderWithObservability(obsrep *internal.ObsReport) internal.RequestSender { return &metricsSenderWithObservability{obsrep: obsrep} } -func (mewo *metricsSenderWithObservability) send(ctx context.Context, req Request) error { - c := mewo.obsrep.startMetricsOp(ctx) +func (mewo *metricsSenderWithObservability) Send(ctx context.Context, req Request) error { + c := mewo.obsrep.StartMetricsOp(ctx) numMetricDataPoints := req.ItemsCount() - err := mewo.nextSender.send(c, req) - mewo.obsrep.endMetricsOp(c, numMetricDataPoints, err) + err := mewo.NextSender.Send(c, req) + mewo.obsrep.EndMetricsOp(c, numMetricDataPoints, err) return err } diff --git a/exporter/exporterhelper/metrics_test.go b/exporter/exporterhelper/metrics_test.go index b97b2cdc650..1f90b02e42d 100644 --- a/exporter/exporterhelper/metrics_test.go +++ b/exporter/exporterhelper/metrics_test.go @@ -66,7 +66,7 @@ func TestMetricsExporter_NilLogger(t *testing.T) { func TestMetricsRequestExporter_NilLogger(t *testing.T) { me, err := NewMetricsRequestExporter(context.Background(), exporter.Settings{}, - (&fakeRequestConverter{}).requestFromMetricsFunc) + (&internal.FakeRequestConverter{}).RequestFromMetricsFunc) require.Nil(t, me) require.Equal(t, errNilLogger, err) } @@ -98,7 +98,7 @@ func TestMetricsExporter_Default(t *testing.T) { func TestMetricsRequestExporter_Default(t *testing.T) { md := pmetric.NewMetrics() me, err := NewMetricsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromMetricsFunc) + (&internal.FakeRequestConverter{}).RequestFromMetricsFunc) assert.NoError(t, err) assert.NotNil(t, me) @@ -120,7 +120,7 @@ func TestMetricsExporter_WithCapabilities(t *testing.T) { func TestMetricsRequestExporter_WithCapabilities(t *testing.T) { capabilities := consumer.Capabilities{MutatesData: true} me, err := NewMetricsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromMetricsFunc, WithCapabilities(capabilities)) + (&internal.FakeRequestConverter{}).RequestFromMetricsFunc, WithCapabilities(capabilities)) assert.NoError(t, err) assert.NotNil(t, me) @@ -140,7 +140,7 @@ func TestMetricsRequestExporter_Default_ConvertError(t *testing.T) { md := pmetric.NewMetrics() want := errors.New("convert_error") me, err := NewMetricsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{metricsError: want}).requestFromMetricsFunc) + (&internal.FakeRequestConverter{MetricsError: want}).RequestFromMetricsFunc) require.NoError(t, err) require.NotNil(t, me) require.Equal(t, consumererror.NewPermanent(want), me.ConsumeMetrics(context.Background(), md)) @@ -150,7 +150,7 @@ func TestMetricsRequestExporter_Default_ExportError(t *testing.T) { md := pmetric.NewMetrics() want := errors.New("export_error") me, err := NewMetricsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{requestError: want}).requestFromMetricsFunc) + (&internal.FakeRequestConverter{RequestError: want}).RequestFromMetricsFunc) require.NoError(t, err) require.NotNil(t, me) require.Equal(t, want, me.ConsumeMetrics(context.Background(), md)) @@ -167,7 +167,7 @@ func TestMetricsExporter_WithPersistentQueue(t *testing.T) { te, err := NewMetricsExporter(context.Background(), set, &fakeTracesExporterConfig, ms.ConsumeMetrics, WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - host := &mockHost{ext: map[component.ID]component.Component{ + host := &internal.MockHost{Ext: map[component.ID]component.Component{ storageID: queue.NewMockStorageExtension(nil), }} require.NoError(t, te.Start(context.Background(), host)) @@ -214,7 +214,7 @@ func TestMetricsRequestExporter_WithRecordMetrics(t *testing.T) { me, err := NewMetricsRequestExporter(context.Background(), exporter.Settings{ID: fakeMetricsExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, - (&fakeRequestConverter{}).requestFromMetricsFunc) + (&internal.FakeRequestConverter{}).RequestFromMetricsFunc) require.NoError(t, err) require.NotNil(t, me) @@ -242,7 +242,7 @@ func TestMetricsRequestExporter_WithRecordMetrics_ExportError(t *testing.T) { me, err := NewMetricsRequestExporter(context.Background(), exporter.Settings{ID: fakeMetricsExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, - (&fakeRequestConverter{requestError: want}).requestFromMetricsFunc) + (&internal.FakeRequestConverter{RequestError: want}).RequestFromMetricsFunc) require.NoError(t, err) require.NotNil(t, me) @@ -294,7 +294,7 @@ func TestMetricsRequestExporter_WithSpan(t *testing.T) { otel.SetTracerProvider(set.TracerProvider) defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) - me, err := NewMetricsRequestExporter(context.Background(), set, (&fakeRequestConverter{}).requestFromMetricsFunc) + me, err := NewMetricsRequestExporter(context.Background(), set, (&internal.FakeRequestConverter{}).RequestFromMetricsFunc) require.NoError(t, err) require.NotNil(t, me) checkWrapSpanForMetricsExporter(t, sr, set.TracerProvider.Tracer("test"), me, nil, 2) @@ -322,7 +322,7 @@ func TestMetricsRequestExporter_WithSpan_ExportError(t *testing.T) { defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) want := errors.New("my_error") - me, err := NewMetricsRequestExporter(context.Background(), set, (&fakeRequestConverter{requestError: want}).requestFromMetricsFunc) + me, err := NewMetricsRequestExporter(context.Background(), set, (&internal.FakeRequestConverter{RequestError: want}).RequestFromMetricsFunc) require.NoError(t, err) require.NotNil(t, me) checkWrapSpanForMetricsExporter(t, sr, set.TracerProvider.Tracer("test"), me, want, 2) @@ -346,7 +346,7 @@ func TestMetricsRequestExporter_WithShutdown(t *testing.T) { shutdown := func(context.Context) error { shutdownCalled = true; return nil } me, err := NewMetricsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromMetricsFunc, WithShutdown(shutdown)) + (&internal.FakeRequestConverter{}).RequestFromMetricsFunc, WithShutdown(shutdown)) assert.NotNil(t, me) assert.NoError(t, err) @@ -372,7 +372,7 @@ func TestMetricsRequestExporter_WithShutdown_ReturnError(t *testing.T) { shutdownErr := func(context.Context) error { return want } me, err := NewMetricsRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromMetricsFunc, WithShutdown(shutdownErr)) + (&internal.FakeRequestConverter{}).RequestFromMetricsFunc, WithShutdown(shutdownErr)) assert.NotNil(t, me) assert.NoError(t, err) @@ -430,7 +430,7 @@ func checkWrapSpanForMetricsExporter(t *testing.T, sr *tracetest.SpanRecorder, t require.Equalf(t, fakeMetricsParentSpanName, parentSpan.Name(), "SpanData %v", parentSpan) for _, sd := range gotSpanData[:numRequests] { require.Equalf(t, parentSpan.SpanContext(), sd.Parent(), "Exporter span not a child\nSpanData %v", sd) - checkStatus(t, sd, wantError) + internal.CheckStatus(t, sd, wantError) sentMetricPoints := numMetricPoints var failedToSendMetricPoints int64 diff --git a/exporter/exporterhelper/obsreport_test.go b/exporter/exporterhelper/obsreport_test.go index f8ab9aed1c8..80134bc8a62 100644 --- a/exporter/exporterhelper/obsreport_test.go +++ b/exporter/exporterhelper/obsreport_test.go @@ -12,28 +12,31 @@ import ( "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" ) +var exporterID = component.MustNewID("fakeExporter") + func TestExportEnqueueFailure(t *testing.T) { tt, err := componenttest.SetupTelemetry(exporterID) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tt.Shutdown(context.Background())) }) - obsrep, err := newExporter(obsReportSettings{ - exporterID: exporterID, - exporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, + obsrep, err := internal.NewExporter(internal.ObsReportSettings{ + ExporterID: exporterID, + ExporterCreateSettings: exporter.Settings{ID: exporterID, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, }) require.NoError(t, err) logRecords := int64(7) - obsrep.recordEnqueueFailure(context.Background(), component.DataTypeLogs, logRecords) + obsrep.RecordEnqueueFailure(context.Background(), component.DataTypeLogs, logRecords) require.NoError(t, tt.CheckExporterEnqueueFailedLogs(logRecords)) spans := int64(12) - obsrep.recordEnqueueFailure(context.Background(), component.DataTypeTraces, spans) + obsrep.RecordEnqueueFailure(context.Background(), component.DataTypeTraces, spans) require.NoError(t, tt.CheckExporterEnqueueFailedTraces(spans)) metricPoints := int64(21) - obsrep.recordEnqueueFailure(context.Background(), component.DataTypeMetrics, metricPoints) + obsrep.RecordEnqueueFailure(context.Background(), component.DataTypeMetrics, metricPoints) require.NoError(t, tt.CheckExporterEnqueueFailedMetrics(metricPoints)) } diff --git a/exporter/exporterhelper/queue_sender.go b/exporter/exporterhelper/queue_sender.go index 58edbcc8732..b81e2036fab 100644 --- a/exporter/exporterhelper/queue_sender.go +++ b/exporter/exporterhelper/queue_sender.go @@ -3,143 +3,20 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" -import ( - "context" - "errors" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/trace" - "go.uber.org/multierr" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/component" - "go.opentelemetry.io/collector/exporter" - "go.opentelemetry.io/collector/exporter/exporterhelper/internal" - "go.opentelemetry.io/collector/exporter/exporterqueue" - "go.opentelemetry.io/collector/exporter/internal/queue" -) - -const defaultQueueSize = 1000 +import "go.opentelemetry.io/collector/exporter/exporterhelper/internal" // Deprecated: [v0.110.0] Use QueueConfig instead. -type QueueSettings = QueueConfig +type QueueSettings = internal.QueueConfig // QueueConfig defines configuration for queueing batches before sending to the consumerSender. -type QueueConfig struct { - // Enabled indicates whether to not enqueue batches before sending to the consumerSender. - Enabled bool `mapstructure:"enabled"` - // NumConsumers is the number of consumers from the queue. Defaults to 10. - // If batching is enabled, a combined batch cannot contain more requests than the number of consumers. - // So it's recommended to set higher number of consumers if batching is enabled. - NumConsumers int `mapstructure:"num_consumers"` - // QueueSize is the maximum number of batches allowed in queue at a given time. - QueueSize int `mapstructure:"queue_size"` - // StorageID if not empty, enables the persistent storage and uses the component specified - // as a storage extension for the persistent queue - StorageID *component.ID `mapstructure:"storage"` -} +type QueueConfig = internal.QueueConfig // Deprecated: [v0.110.0] Use NewDefaultQueueConfig instead. func NewDefaultQueueSettings() QueueSettings { - return NewDefaultQueueConfig() + return internal.NewDefaultQueueConfig() } // NewDefaultQueueConfig returns the default config for QueueConfig. func NewDefaultQueueConfig() QueueConfig { - return QueueConfig{ - Enabled: true, - NumConsumers: 10, - // By default, batches are 8192 spans, for a total of up to 8 million spans in the queue - // This can be estimated at 1-4 GB worth of maximum memory usage - // This default is probably still too high, and may be adjusted further down in a future release - QueueSize: defaultQueueSize, - } -} - -// Validate checks if the QueueConfig configuration is valid -func (qCfg *QueueConfig) Validate() error { - if !qCfg.Enabled { - return nil - } - - if qCfg.QueueSize <= 0 { - return errors.New("queue size must be positive") - } - - if qCfg.NumConsumers <= 0 { - return errors.New("number of queue consumers must be positive") - } - - return nil -} - -type queueSender struct { - baseRequestSender - queue exporterqueue.Queue[Request] - numConsumers int - traceAttribute attribute.KeyValue - consumers *queue.Consumers[Request] - - obsrep *obsReport - exporterID component.ID -} - -func newQueueSender(q exporterqueue.Queue[Request], set exporter.Settings, numConsumers int, - exportFailureMessage string, obsrep *obsReport) *queueSender { - qs := &queueSender{ - queue: q, - numConsumers: numConsumers, - traceAttribute: attribute.String(internal.ExporterKey, set.ID.String()), - obsrep: obsrep, - exporterID: set.ID, - } - consumeFunc := func(ctx context.Context, req Request) error { - err := qs.nextSender.send(ctx, req) - if err != nil { - set.Logger.Error("Exporting failed. Dropping data."+exportFailureMessage, - zap.Error(err), zap.Int("dropped_items", req.ItemsCount())) - } - return err - } - qs.consumers = queue.NewQueueConsumers[Request](q, numConsumers, consumeFunc) - return qs -} - -// Start is invoked during service startup. -func (qs *queueSender) Start(ctx context.Context, host component.Host) error { - if err := qs.consumers.Start(ctx, host); err != nil { - return err - } - - dataTypeAttr := attribute.String(internal.DataTypeKey, qs.obsrep.dataType.String()) - return multierr.Append( - qs.obsrep.telemetryBuilder.InitExporterQueueSize(func() int64 { return int64(qs.queue.Size()) }, - metric.WithAttributeSet(attribute.NewSet(qs.traceAttribute, dataTypeAttr))), - qs.obsrep.telemetryBuilder.InitExporterQueueCapacity(func() int64 { return int64(qs.queue.Capacity()) }, - metric.WithAttributeSet(attribute.NewSet(qs.traceAttribute))), - ) -} - -// Shutdown is invoked during service shutdown. -func (qs *queueSender) Shutdown(ctx context.Context) error { - // Stop the queue and consumers, this will drain the queue and will call the retry (which is stopped) that will only - // try once every request. - return qs.consumers.Shutdown(ctx) -} - -// send implements the requestSender interface. It puts the request in the queue. -func (qs *queueSender) send(ctx context.Context, req Request) error { - // Prevent cancellation and deadline to propagate to the context stored in the queue. - // The grpc/http based receivers will cancel the request context after this function returns. - c := context.WithoutCancel(ctx) - - span := trace.SpanFromContext(c) - if err := qs.queue.Offer(c, req); err != nil { - span.AddEvent("Failed to enqueue item.", trace.WithAttributes(qs.traceAttribute)) - return err - } - - span.AddEvent("Enqueued item.", trace.WithAttributes(qs.traceAttribute)) - return nil + return internal.NewDefaultQueueConfig() } diff --git a/exporter/exporterhelper/retry_sender.go b/exporter/exporterhelper/retry_sender.go index 0caa10ad72f..5b4476bb1f6 100644 --- a/exporter/exporterhelper/retry_sender.go +++ b/exporter/exporterhelper/retry_sender.go @@ -4,139 +4,12 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" import ( - "context" - "errors" - "fmt" "time" - "github.com/cenkalti/backoff/v4" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "go.uber.org/zap" - - "go.opentelemetry.io/collector/config/configretry" - "go.opentelemetry.io/collector/consumer/consumererror" - "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterhelper/internal" - "go.opentelemetry.io/collector/exporter/internal/experr" ) -// TODO: Clean this by forcing all exporters to return an internal error type that always include the information about retries. -type throttleRetry struct { - err error - delay time.Duration -} - -func (t throttleRetry) Error() string { - return "Throttle (" + t.delay.String() + "), error: " + t.err.Error() -} - -func (t throttleRetry) Unwrap() error { - return t.err -} - // NewThrottleRetry creates a new throttle retry error. func NewThrottleRetry(err error, delay time.Duration) error { - return throttleRetry{ - err: err, - delay: delay, - } -} - -type retrySender struct { - baseRequestSender - traceAttribute attribute.KeyValue - cfg configretry.BackOffConfig - stopCh chan struct{} - logger *zap.Logger -} - -func newRetrySender(config configretry.BackOffConfig, set exporter.Settings) *retrySender { - return &retrySender{ - traceAttribute: attribute.String(internal.ExporterKey, set.ID.String()), - cfg: config, - stopCh: make(chan struct{}), - logger: set.Logger, - } -} - -func (rs *retrySender) Shutdown(context.Context) error { - close(rs.stopCh) - return nil -} - -// send implements the requestSender interface -func (rs *retrySender) send(ctx context.Context, req Request) error { - // Do not use NewExponentialBackOff since it calls Reset and the code here must - // call Reset after changing the InitialInterval (this saves an unnecessary call to Now). - expBackoff := backoff.ExponentialBackOff{ - InitialInterval: rs.cfg.InitialInterval, - RandomizationFactor: rs.cfg.RandomizationFactor, - Multiplier: rs.cfg.Multiplier, - MaxInterval: rs.cfg.MaxInterval, - MaxElapsedTime: rs.cfg.MaxElapsedTime, - Stop: backoff.Stop, - Clock: backoff.SystemClock, - } - expBackoff.Reset() - span := trace.SpanFromContext(ctx) - retryNum := int64(0) - for { - span.AddEvent( - "Sending request.", - trace.WithAttributes(rs.traceAttribute, attribute.Int64("retry_num", retryNum))) - - err := rs.nextSender.send(ctx, req) - if err == nil { - return nil - } - - // Immediately drop data on permanent errors. - if consumererror.IsPermanent(err) { - return fmt.Errorf("not retryable error: %w", err) - } - - req = extractPartialRequest(req, err) - - backoffDelay := expBackoff.NextBackOff() - if backoffDelay == backoff.Stop { - return fmt.Errorf("no more retries left: %w", err) - } - - throttleErr := throttleRetry{} - if errors.As(err, &throttleErr) { - backoffDelay = max(backoffDelay, throttleErr.delay) - } - - backoffDelayStr := backoffDelay.String() - span.AddEvent( - "Exporting failed. Will retry the request after interval.", - trace.WithAttributes( - rs.traceAttribute, - attribute.String("interval", backoffDelayStr), - attribute.String("error", err.Error()))) - rs.logger.Info( - "Exporting failed. Will retry the request after interval.", - zap.Error(err), - zap.String("interval", backoffDelayStr), - ) - retryNum++ - - // back-off, but get interrupted when shutting down or request is cancelled or timed out. - select { - case <-ctx.Done(): - return fmt.Errorf("request is cancelled or timed out %w", err) - case <-rs.stopCh: - return experr.NewShutdownErr(err) - case <-time.After(backoffDelay): - } - } -} - -// max returns the larger of x or y. -func max(x, y time.Duration) time.Duration { - if x < y { - return y - } - return x + return internal.NewThrottleRetry(err, delay) } diff --git a/exporter/exporterhelper/timeout_sender.go b/exporter/exporterhelper/timeout_sender.go index 9e489f54ded..9788397b7d2 100644 --- a/exporter/exporterhelper/timeout_sender.go +++ b/exporter/exporterhelper/timeout_sender.go @@ -4,55 +4,20 @@ package exporterhelper // import "go.opentelemetry.io/collector/exporter/exporterhelper" import ( - "context" - "errors" - "time" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" ) // Deprecated: [v0.110.0] Use TimeoutConfig instead. type TimeoutSettings = TimeoutConfig -// TimeoutConfig for timeout. The timeout applies to individual attempts to send data to the backend. -type TimeoutConfig struct { - // Timeout is the timeout for every attempt to send data to the backend. - // A zero timeout means no timeout. - Timeout time.Duration `mapstructure:"timeout"` -} - -func (ts *TimeoutConfig) Validate() error { - // Negative timeouts are not acceptable, since all sends will fail. - if ts.Timeout < 0 { - return errors.New("'timeout' must be non-negative") - } - return nil -} +type TimeoutConfig = internal.TimeoutConfig // Deprecated: [v0.110.0] Use NewDefaultTimeoutConfig instead. func NewDefaultTimeoutSettings() TimeoutSettings { - return NewDefaultTimeoutConfig() + return internal.NewDefaultTimeoutConfig() } // NewDefaultTimeoutConfig returns the default config for TimeoutConfig. func NewDefaultTimeoutConfig() TimeoutConfig { - return TimeoutConfig{ - Timeout: 5 * time.Second, - } -} - -// timeoutSender is a requestSender that adds a `timeout` to every request that passes this sender. -type timeoutSender struct { - baseRequestSender - cfg TimeoutConfig -} - -func (ts *timeoutSender) send(ctx context.Context, req Request) error { - // TODO: Remove this by avoiding to create the timeout sender if timeout is 0. - if ts.cfg.Timeout == 0 { - return req.Export(ctx) - } - // Intentionally don't overwrite the context inside the request, because in case of retries deadline will not be - // updated because this deadline most likely is before the next one. - tCtx, cancelFunc := context.WithTimeout(ctx, ts.cfg.Timeout) - defer cancelFunc() - return req.Export(tCtx) + return internal.NewDefaultTimeoutConfig() } diff --git a/exporter/exporterhelper/traces.go b/exporter/exporterhelper/traces.go index 075db219d6a..da057a861bf 100644 --- a/exporter/exporterhelper/traces.go +++ b/exporter/exporterhelper/traces.go @@ -13,6 +13,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/consumer/consumererror" "go.opentelemetry.io/collector/exporter" + "go.opentelemetry.io/collector/exporter/exporterhelper/internal" "go.opentelemetry.io/collector/exporter/exporterqueue" "go.opentelemetry.io/collector/exporter/internal/queue" "go.opentelemetry.io/collector/pdata/ptrace" @@ -64,7 +65,7 @@ func (req *tracesRequest) ItemsCount() int { } type traceExporter struct { - *baseExporter + *internal.BaseExporter consumer.Traces } @@ -83,8 +84,8 @@ func NewTracesExporter( return nil, errNilPushTraceData } tracesOpts := []Option{ - withMarshaler(tracesRequestMarshaler), withUnmarshaler(newTraceRequestUnmarshalerFunc(pusher)), - withBatchFuncs(mergeTraces, mergeSplitTraces), + internal.WithMarshaler(tracesRequestMarshaler), internal.WithUnmarshaler(newTraceRequestUnmarshalerFunc(pusher)), + internal.WithBatchFuncs(mergeTraces, mergeSplitTraces), } return NewTracesRequestExporter(ctx, set, requestFromTraces(pusher), append(tracesOpts, options...)...) } @@ -118,7 +119,7 @@ func NewTracesRequestExporter( return nil, errNilTracesConverter } - be, err := newBaseExporter(set, component.DataTypeTraces, newTracesExporterWithObservability, options...) + be, err := internal.NewBaseExporter(set, component.DataTypeTraces, newTracesExporterWithObservability, options...) if err != nil { return nil, err } @@ -131,33 +132,33 @@ func NewTracesRequestExporter( zap.Error(err)) return consumererror.NewPermanent(cErr) } - sErr := be.send(ctx, req) + sErr := be.Send(ctx, req) if errors.Is(sErr, queue.ErrQueueIsFull) { - be.obsrep.recordEnqueueFailure(ctx, component.DataTypeTraces, int64(req.ItemsCount())) + be.Obsrep.RecordEnqueueFailure(ctx, component.DataTypeTraces, int64(req.ItemsCount())) } return sErr - }, be.consumerOptions...) + }, be.ConsumerOptions...) return &traceExporter{ - baseExporter: be, + BaseExporter: be, Traces: tc, }, err } type tracesExporterWithObservability struct { - baseRequestSender - obsrep *obsReport + internal.BaseRequestSender + obsrep *internal.ObsReport } -func newTracesExporterWithObservability(obsrep *obsReport) requestSender { +func newTracesExporterWithObservability(obsrep *internal.ObsReport) internal.RequestSender { return &tracesExporterWithObservability{obsrep: obsrep} } -func (tewo *tracesExporterWithObservability) send(ctx context.Context, req Request) error { - c := tewo.obsrep.startTracesOp(ctx) +func (tewo *tracesExporterWithObservability) Send(ctx context.Context, req Request) error { + c := tewo.obsrep.StartTracesOp(ctx) numTraceSpans := req.ItemsCount() // Forward the data to the next consumer (this pusher is the next). - err := tewo.nextSender.send(c, req) - tewo.obsrep.endTracesOp(c, numTraceSpans, err) + err := tewo.NextSender.Send(c, req) + tewo.obsrep.EndTracesOp(c, numTraceSpans, err) return err } diff --git a/exporter/exporterhelper/traces_test.go b/exporter/exporterhelper/traces_test.go index 103b9b76060..3dc277909de 100644 --- a/exporter/exporterhelper/traces_test.go +++ b/exporter/exporterhelper/traces_test.go @@ -61,7 +61,7 @@ func TestTracesExporter_NilLogger(t *testing.T) { } func TestTracesRequestExporter_NilLogger(t *testing.T) { - te, err := NewTracesRequestExporter(context.Background(), exporter.Settings{}, (&fakeRequestConverter{}).requestFromTracesFunc) + te, err := NewTracesRequestExporter(context.Background(), exporter.Settings{}, (&internal.FakeRequestConverter{}).RequestFromTracesFunc) require.Nil(t, te) require.Equal(t, errNilLogger, err) } @@ -93,7 +93,7 @@ func TestTracesExporter_Default(t *testing.T) { func TestTracesRequestExporter_Default(t *testing.T) { td := ptrace.NewTraces() te, err := NewTracesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromTracesFunc) + (&internal.FakeRequestConverter{}).RequestFromTracesFunc) assert.NotNil(t, te) assert.NoError(t, err) @@ -115,7 +115,7 @@ func TestTracesExporter_WithCapabilities(t *testing.T) { func TestTracesRequestExporter_WithCapabilities(t *testing.T) { capabilities := consumer.Capabilities{MutatesData: true} te, err := NewTracesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromTracesFunc, WithCapabilities(capabilities)) + (&internal.FakeRequestConverter{}).RequestFromTracesFunc, WithCapabilities(capabilities)) assert.NotNil(t, te) assert.NoError(t, err) @@ -137,7 +137,7 @@ func TestTracesRequestExporter_Default_ConvertError(t *testing.T) { td := ptrace.NewTraces() want := errors.New("convert_error") te, err := NewTracesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{tracesError: want}).requestFromTracesFunc) + (&internal.FakeRequestConverter{TracesError: want}).RequestFromTracesFunc) require.NoError(t, err) require.NotNil(t, te) require.Equal(t, consumererror.NewPermanent(want), te.ConsumeTraces(context.Background(), td)) @@ -147,7 +147,7 @@ func TestTracesRequestExporter_Default_ExportError(t *testing.T) { td := ptrace.NewTraces() want := errors.New("export_error") te, err := NewTracesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{requestError: want}).requestFromTracesFunc) + (&internal.FakeRequestConverter{RequestError: want}).RequestFromTracesFunc) require.NoError(t, err) require.NotNil(t, te) require.Equal(t, want, te.ConsumeTraces(context.Background(), td)) @@ -164,7 +164,7 @@ func TestTracesExporter_WithPersistentQueue(t *testing.T) { te, err := NewTracesExporter(context.Background(), set, &fakeTracesExporterConfig, ts.ConsumeTraces, WithRetry(rCfg), WithQueue(qCfg)) require.NoError(t, err) - host := &mockHost{ext: map[component.ID]component.Component{ + host := &internal.MockHost{Ext: map[component.ID]component.Component{ storageID: queue.NewMockStorageExtension(nil), }} require.NoError(t, te.Start(context.Background(), host)) @@ -211,7 +211,7 @@ func TestTracesRequestExporter_WithRecordMetrics(t *testing.T) { te, err := NewTracesRequestExporter(context.Background(), exporter.Settings{ID: fakeTracesExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, - (&fakeRequestConverter{}).requestFromTracesFunc) + (&internal.FakeRequestConverter{}).RequestFromTracesFunc) require.NoError(t, err) require.NotNil(t, te) @@ -239,7 +239,7 @@ func TestTracesRequestExporter_WithRecordMetrics_RequestSenderError(t *testing.T te, err := NewTracesRequestExporter(context.Background(), exporter.Settings{ID: fakeTracesExporterName, TelemetrySettings: tt.TelemetrySettings(), BuildInfo: component.NewDefaultBuildInfo()}, - (&fakeRequestConverter{requestError: want}).requestFromTracesFunc) + (&internal.FakeRequestConverter{RequestError: want}).RequestFromTracesFunc) require.NoError(t, err) require.NotNil(t, te) @@ -292,7 +292,7 @@ func TestTracesRequestExporter_WithSpan(t *testing.T) { otel.SetTracerProvider(set.TracerProvider) defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) - te, err := NewTracesRequestExporter(context.Background(), set, (&fakeRequestConverter{}).requestFromTracesFunc) + te, err := NewTracesRequestExporter(context.Background(), set, (&internal.FakeRequestConverter{}).RequestFromTracesFunc) require.NoError(t, err) require.NotNil(t, te) @@ -322,7 +322,7 @@ func TestTracesRequestExporter_WithSpan_ExportError(t *testing.T) { defer otel.SetTracerProvider(nooptrace.NewTracerProvider()) want := errors.New("export_error") - te, err := NewTracesRequestExporter(context.Background(), set, (&fakeRequestConverter{requestError: want}).requestFromTracesFunc) + te, err := NewTracesRequestExporter(context.Background(), set, (&internal.FakeRequestConverter{RequestError: want}).RequestFromTracesFunc) require.NoError(t, err) require.NotNil(t, te) @@ -347,7 +347,7 @@ func TestTracesRequestExporter_WithShutdown(t *testing.T) { shutdown := func(context.Context) error { shutdownCalled = true; return nil } te, err := NewTracesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromTracesFunc, WithShutdown(shutdown)) + (&internal.FakeRequestConverter{}).RequestFromTracesFunc, WithShutdown(shutdown)) assert.NotNil(t, te) assert.NoError(t, err) @@ -373,7 +373,7 @@ func TestTracesRequestExporter_WithShutdown_ReturnError(t *testing.T) { shutdownErr := func(context.Context) error { return want } te, err := NewTracesRequestExporter(context.Background(), exportertest.NewNopSettings(), - (&fakeRequestConverter{}).requestFromTracesFunc, WithShutdown(shutdownErr)) + (&internal.FakeRequestConverter{}).RequestFromTracesFunc, WithShutdown(shutdownErr)) assert.NotNil(t, te) assert.NoError(t, err) @@ -433,7 +433,7 @@ func checkWrapSpanForTracesExporter(t *testing.T, sr *tracetest.SpanRecorder, tr for _, sd := range gotSpanData[:numRequests] { require.Equalf(t, parentSpan.SpanContext(), sd.Parent(), "Exporter span not a child\nSpanData %v", sd) - checkStatus(t, sd, wantError) + internal.CheckStatus(t, sd, wantError) sentSpans := numSpans var failedToSendSpans int64 diff --git a/exporter/exporterprofiles/go.mod b/exporter/exporterprofiles/go.mod index 8d694a04f99..e659701ee31 100644 --- a/exporter/exporterprofiles/go.mod +++ b/exporter/exporterprofiles/go.mod @@ -62,7 +62,3 @@ replace go.opentelemetry.io/collector/consumer => ../../consumer replace go.opentelemetry.io/collector/exporter => ../ replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../receiver/receiverprofiles - -replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles - -replace go.opentelemetry.io/collector/component/componentprofiles => ../../component/componentprofiles diff --git a/exporter/go.mod b/exporter/go.mod index d558514b88a..9fa3d2f6cab 100644 --- a/exporter/go.mod +++ b/exporter/go.mod @@ -7,11 +7,9 @@ require ( github.com/google/uuid v1.6.0 github.com/stretchr/testify v1.9.0 go.opentelemetry.io/collector/component v0.109.0 - go.opentelemetry.io/collector/component/componentprofiles v0.109.0 go.opentelemetry.io/collector/config/configretry v1.15.0 go.opentelemetry.io/collector/config/configtelemetry v0.109.0 go.opentelemetry.io/collector/consumer v0.109.0 - go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 go.opentelemetry.io/collector/consumer/consumerprofiles v0.109.0 go.opentelemetry.io/collector/consumer/consumertest v0.109.0 go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 @@ -83,12 +81,8 @@ replace go.opentelemetry.io/collector/config/configtelemetry => ../config/config replace go.opentelemetry.io/collector/consumer/consumerprofiles => ../consumer/consumerprofiles -replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../consumer/consumererror/consumererrorprofiles - replace go.opentelemetry.io/collector/consumer/consumertest => ../consumer/consumertest replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../receiver/receiverprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ./exporterprofiles - -replace go.opentelemetry.io/collector/component/componentprofiles => ../component/componentprofiles diff --git a/exporter/loggingexporter/go.mod b/exporter/loggingexporter/go.mod index e72370a5b8e..fd226a6073e 100644 --- a/exporter/loggingexporter/go.mod +++ b/exporter/loggingexporter/go.mod @@ -39,10 +39,8 @@ require ( github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.59.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect - go.opentelemetry.io/collector/component/componentprofiles v0.109.0 // indirect go.opentelemetry.io/collector/config/configretry v1.15.0 // indirect go.opentelemetry.io/collector/consumer v0.109.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.109.0 // indirect go.opentelemetry.io/collector/consumer/consumertest v0.109.0 // indirect go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 // indirect @@ -103,7 +101,3 @@ replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/co replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../receiver/receiverprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporterprofiles - -replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles - -replace go.opentelemetry.io/collector/component/componentprofiles => ../../component/componentprofiles diff --git a/exporter/nopexporter/go.mod b/exporter/nopexporter/go.mod index f28938faa5f..f45e33b3c31 100644 --- a/exporter/nopexporter/go.mod +++ b/exporter/nopexporter/go.mod @@ -91,7 +91,3 @@ replace go.opentelemetry.io/collector/consumer/consumertest => ../../consumer/co replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../receiver/receiverprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporterprofiles - -replace go.opentelemetry.io/collector/component/componentprofiles => ../../component/componentprofiles - -replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles diff --git a/exporter/otlpexporter/factory.go b/exporter/otlpexporter/factory.go index 43e933965cf..84891ceb153 100644 --- a/exporter/otlpexporter/factory.go +++ b/exporter/otlpexporter/factory.go @@ -15,6 +15,7 @@ import ( "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterbatcher" "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles" "go.opentelemetry.io/collector/exporter/exporterprofiles" "go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata" ) @@ -114,10 +115,10 @@ func createProfilesExporter( ) (exporterprofiles.Profiles, error) { oce := newExporter(cfg, set) oCfg := cfg.(*Config) - return exporterhelper.NewProfilesExporter(ctx, set, cfg, + return exporterhelperprofiles.NewProfilesExporter(ctx, set, cfg, oce.pushProfiles, exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), - exporterhelper.WithTimeout(oCfg.TimeoutSettings), + exporterhelper.WithTimeout(oCfg.TimeoutConfig), exporterhelper.WithRetry(oCfg.RetryConfig), exporterhelper.WithQueue(oCfg.QueueConfig), exporterhelper.WithBatcher(oCfg.BatcherConfig), diff --git a/exporter/otlpexporter/factory_test.go b/exporter/otlpexporter/factory_test.go index 68af457102e..f533d2c6558 100644 --- a/exporter/otlpexporter/factory_test.go +++ b/exporter/otlpexporter/factory_test.go @@ -332,7 +332,7 @@ func TestCreateProfilesExporter(t *testing.T) { if err != nil { // Since the endpoint of OTLP exporter doesn't actually exist, // exporter may already stop because it cannot connect. - assert.Equal(t, err.Error(), "rpc error: code = Canceled desc = grpc: the client connection is closing") + assert.Equal(t, "rpc error: code = Canceled desc = grpc: the client connection is closing", err.Error()) } }) } diff --git a/exporter/otlpexporter/go.mod b/exporter/otlpexporter/go.mod index 27f61ed1656..4dcfc0ae86e 100644 --- a/exporter/otlpexporter/go.mod +++ b/exporter/otlpexporter/go.mod @@ -15,6 +15,7 @@ require ( go.opentelemetry.io/collector/confmap v1.15.0 go.opentelemetry.io/collector/consumer v0.109.0 go.opentelemetry.io/collector/exporter v0.109.0 + go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.0.0-00010101000000-000000000000 go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 go.opentelemetry.io/collector/pdata v1.15.0 go.opentelemetry.io/collector/pdata/pprofile v0.109.0 @@ -145,6 +146,8 @@ replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../receive replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporterprofiles +replace go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles => ../exporterhelper/exporterhelperprofiles + replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles replace go.opentelemetry.io/collector/component/componentprofiles => ../../component/componentprofiles diff --git a/exporter/otlpexporter/otlp_test.go b/exporter/otlpexporter/otlp_test.go index d38de21e126..91d8c6f7415 100644 --- a/exporter/otlpexporter/otlp_test.go +++ b/exporter/otlpexporter/otlp_test.go @@ -938,7 +938,7 @@ func TestSendProfiles(t *testing.T) { assert.EqualValues(t, td, rcv.getLastRequest()) md := rcv.getMetadata() - require.EqualValues(t, md.Get("header"), expectedHeader) + require.EqualValues(t, expectedHeader, md.Get("header")) require.Len(t, md.Get("User-Agent"), 1) require.Contains(t, md.Get("User-Agent")[0], "Collector/1.2.3test") diff --git a/exporter/otlphttpexporter/factory.go b/exporter/otlphttpexporter/factory.go index d19acc80e2f..ea9fa969b43 100644 --- a/exporter/otlphttpexporter/factory.go +++ b/exporter/otlphttpexporter/factory.go @@ -18,6 +18,7 @@ import ( "go.opentelemetry.io/collector/consumer" "go.opentelemetry.io/collector/exporter" "go.opentelemetry.io/collector/exporter/exporterhelper" + "go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles" "go.opentelemetry.io/collector/exporter/exporterprofiles" "go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata" ) @@ -163,12 +164,12 @@ func createProfilesExporter( return nil, err } - return exporterhelper.NewProfilesExporter(ctx, set, cfg, + return exporterhelperprofiles.NewProfilesExporter(ctx, set, cfg, oce.pushProfiles, exporterhelper.WithStart(oce.start), exporterhelper.WithCapabilities(consumer.Capabilities{MutatesData: false}), // explicitly disable since we rely on http.Client timeout logic. - exporterhelper.WithTimeout(exporterhelper.TimeoutSettings{Timeout: 0}), + exporterhelper.WithTimeout(exporterhelper.TimeoutConfig{Timeout: 0}), exporterhelper.WithRetry(oCfg.RetryConfig), exporterhelper.WithQueue(oCfg.QueueConfig)) } diff --git a/exporter/otlphttpexporter/go.mod b/exporter/otlphttpexporter/go.mod index ecf43e4c77e..64c4bb166b2 100644 --- a/exporter/otlphttpexporter/go.mod +++ b/exporter/otlphttpexporter/go.mod @@ -14,6 +14,7 @@ require ( go.opentelemetry.io/collector/confmap v1.15.0 go.opentelemetry.io/collector/consumer v0.109.0 go.opentelemetry.io/collector/exporter v0.109.0 + go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.0.0-00010101000000-000000000000 go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 go.opentelemetry.io/collector/pdata v1.15.0 go.opentelemetry.io/collector/pdata/pprofile v0.109.0 @@ -142,6 +143,8 @@ replace go.opentelemetry.io/collector/receiver/receiverprofiles => ../../receive replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporterprofiles +replace go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles => ../exporterhelper/exporterhelperprofiles + replace go.opentelemetry.io/collector/component/componentprofiles => ../../component/componentprofiles replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles diff --git a/internal/e2e/go.mod b/internal/e2e/go.mod index 097e2383acd..bfe82b3fdc4 100644 --- a/internal/e2e/go.mod +++ b/internal/e2e/go.mod @@ -79,6 +79,7 @@ require ( go.opentelemetry.io/collector/connector/connectorprofiles v0.109.0 // indirect go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/collector/consumer/consumerprofiles v0.109.0 // indirect + go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.0.0-00010101000000-000000000000 // indirect go.opentelemetry.io/collector/exporter/exporterprofiles v0.109.0 // indirect go.opentelemetry.io/collector/extension/auth v0.109.0 // indirect go.opentelemetry.io/collector/extension/experimental/storage v0.109.0 // indirect @@ -208,4 +209,6 @@ replace go.opentelemetry.io/collector/connector/connectorprofiles => ../../conne replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../../exporter/exporterprofiles +replace go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles => ../../exporter/exporterhelper/exporterhelperprofiles + replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles diff --git a/otelcol/go.mod b/otelcol/go.mod index 4c652584169..9ce0b07ecb8 100644 --- a/otelcol/go.mod +++ b/otelcol/go.mod @@ -178,5 +178,3 @@ replace go.opentelemetry.io/collector/processor/processorprofiles => ../processo replace go.opentelemetry.io/collector/connector/connectorprofiles => ../connector/connectorprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporter/exporterprofiles - -replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../consumer/consumererror/consumererrorprofiles diff --git a/otelcol/otelcoltest/go.mod b/otelcol/otelcoltest/go.mod index 9a2779b4427..bca3cb4814c 100644 --- a/otelcol/otelcoltest/go.mod +++ b/otelcol/otelcoltest/go.mod @@ -193,5 +193,3 @@ replace go.opentelemetry.io/collector/processor/processorprofiles => ../../proce replace go.opentelemetry.io/collector/connector/connectorprofiles => ../../connector/connectorprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../../exporter/exporterprofiles - -replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../../consumer/consumererror/consumererrorprofiles diff --git a/service/go.mod b/service/go.mod index 49dddd51c79..14591fba060 100644 --- a/service/go.mod +++ b/service/go.mod @@ -186,5 +186,3 @@ replace go.opentelemetry.io/collector/processor/processorprofiles => ../processo replace go.opentelemetry.io/collector/connector/connectorprofiles => ../connector/connectorprofiles replace go.opentelemetry.io/collector/exporter/exporterprofiles => ../exporter/exporterprofiles - -replace go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles => ../consumer/consumererror/consumererrorprofiles diff --git a/versions.yaml b/versions.yaml index 124aec065ce..f0befe2a353 100644 --- a/versions.yaml +++ b/versions.yaml @@ -45,6 +45,7 @@ module-sets: - go.opentelemetry.io/collector/exporter - go.opentelemetry.io/collector/exporter/debugexporter - go.opentelemetry.io/collector/exporter/exporterprofiles + - go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles - go.opentelemetry.io/collector/exporter/loggingexporter - go.opentelemetry.io/collector/exporter/nopexporter - go.opentelemetry.io/collector/exporter/otlpexporter From d003c4a6283a80a507cc273fdaa9a64c99fe4a91 Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Thu, 12 Sep 2024 15:48:14 +0200 Subject: [PATCH 15/18] remove profiles from exporterhelper metadata --- exporter/exporterhelper/metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exporter/exporterhelper/metadata.yaml b/exporter/exporterhelper/metadata.yaml index 5dad11fb99b..9156cb95c80 100644 --- a/exporter/exporterhelper/metadata.yaml +++ b/exporter/exporterhelper/metadata.yaml @@ -5,7 +5,7 @@ status: class: exporter not_component: true stability: - beta: [traces, metrics, logs, profiles] + beta: [traces, metrics, logs] distributions: [core, contrib] telemetry: From ada7fe711ebdfb9f2e61700618a6d022ab67caf4 Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Wed, 18 Sep 2024 14:07:15 +0200 Subject: [PATCH 16/18] use released consumer --- consumer/consumererror/consumererrorprofiles/go.mod | 4 ++-- consumer/consumererror/consumererrorprofiles/go.sum | 4 ++-- exporter/exporterhelper/exporterhelperprofiles/go.mod | 2 +- exporter/exporterhelper/exporterhelperprofiles/go.sum | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/consumer/consumererror/consumererrorprofiles/go.mod b/consumer/consumererror/consumererrorprofiles/go.mod index dff43fadf63..f2f81d162cc 100644 --- a/consumer/consumererror/consumererrorprofiles/go.mod +++ b/consumer/consumererror/consumererrorprofiles/go.mod @@ -4,7 +4,7 @@ go 1.22.0 require ( github.com/stretchr/testify v1.9.0 - go.opentelemetry.io/collector/consumer v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/collector/consumer v0.109.0 go.opentelemetry.io/collector/pdata/pprofile v0.109.0 go.opentelemetry.io/collector/pdata/testdata v0.109.0 ) @@ -22,7 +22,7 @@ require ( golang.org/x/sys v0.21.0 // indirect golang.org/x/text v0.16.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 // indirect - google.golang.org/grpc v1.66.0 // indirect + google.golang.org/grpc v1.66.2 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/consumer/consumererror/consumererrorprofiles/go.sum b/consumer/consumererror/consumererrorprofiles/go.sum index 03ca0e47eea..480aed29046 100644 --- a/consumer/consumererror/consumererrorprofiles/go.sum +++ b/consumer/consumererror/consumererrorprofiles/go.sum @@ -66,8 +66,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117 h1:1GBuWVLM/KMVUv1t1En5Gs+gFZCNd360GGb4sSxtrhU= google.golang.org/genproto/googleapis/rpc v0.0.0-20240604185151-ef581f913117/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= -google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= -google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/exporter/exporterhelper/exporterhelperprofiles/go.mod b/exporter/exporterhelper/exporterhelperprofiles/go.mod index d998a4967f5..1984be3abdd 100644 --- a/exporter/exporterhelper/exporterhelperprofiles/go.mod +++ b/exporter/exporterhelper/exporterhelperprofiles/go.mod @@ -45,7 +45,7 @@ require ( golang.org/x/sys v0.25.0 // indirect golang.org/x/text v0.17.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd // indirect - google.golang.org/grpc v1.66.0 // indirect + google.golang.org/grpc v1.66.2 // indirect google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/exporter/exporterhelper/exporterhelperprofiles/go.sum b/exporter/exporterhelper/exporterhelperprofiles/go.sum index e1a8c130283..06005d39a38 100644 --- a/exporter/exporterhelper/exporterhelperprofiles/go.sum +++ b/exporter/exporterhelper/exporterhelperprofiles/go.sum @@ -87,8 +87,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd h1:6TEm2ZxXoQmFWFlt1vNxvVOa1Q0dXFQD1m/rYjXmS0E= google.golang.org/genproto/googleapis/rpc v0.0.0-20240822170219-fc7c04adadcd/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c= -google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= From 9a8b8a7dd7737ca6775fd858d60e044cf134a1b0 Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Wed, 18 Sep 2024 14:08:12 +0200 Subject: [PATCH 17/18] add changelog entry for otlphttpexporter --- .chloggen/otlpexporter-profiles.yaml | 2 +- .chloggen/otlphttpexporter-profiles.yaml | 25 ++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 .chloggen/otlphttpexporter-profiles.yaml diff --git a/.chloggen/otlpexporter-profiles.yaml b/.chloggen/otlpexporter-profiles.yaml index ae21e351e91..072fd13feae 100644 --- a/.chloggen/otlpexporter-profiles.yaml +++ b/.chloggen/otlpexporter-profiles.yaml @@ -7,7 +7,7 @@ change_type: enhancement component: otlpexporter # A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). -note: Support profiles in the OTLP exporters +note: Support profiles in the OTLP exporter # One or more tracking issues or pull requests related to the change issues: [11131] diff --git a/.chloggen/otlphttpexporter-profiles.yaml b/.chloggen/otlphttpexporter-profiles.yaml new file mode 100644 index 00000000000..e7974bc58f5 --- /dev/null +++ b/.chloggen/otlphttpexporter-profiles.yaml @@ -0,0 +1,25 @@ +# Use this changelog template to create an entry for release notes. + +# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix' +change_type: enhancement + +# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver) +component: otlphttpexporter + +# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`). +note: Support profiles in the OTLP HTTP exporter + +# One or more tracking issues or pull requests related to the change +issues: [11131] + +# (Optional) One or more lines of additional information to render under the primary note. +# These lines will be padded with 2 spaces and then inserted directly into the document. +# Use pipe (|) for multiline entries. +subtext: + +# Optional: The change log or logs in which this entry should be included. +# e.g. '[user]' or '[user, api]' +# Include 'user' if the change is relevant to end users. +# Include 'api' if there is a change to a library API. +# Default: '[user]' +change_logs: [api] From 8549655eadf80e845d2577437724776f4d93a9d0 Mon Sep 17 00:00:00 2001 From: dmathieu <42@dmathieu.com> Date: Wed, 18 Sep 2024 14:24:53 +0200 Subject: [PATCH 18/18] fix wrong assert/require merge --- .../exporterhelper/internal/batch_sender_test.go | 16 ++++++++-------- exporter/otlphttpexporter/otlp_test.go | 4 ++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/exporter/exporterhelper/internal/batch_sender_test.go b/exporter/exporterhelper/internal/batch_sender_test.go index eca295b7412..5976a312139 100644 --- a/exporter/exporterhelper/internal/batch_sender_test.go +++ b/exporter/exporterhelper/internal/batch_sender_test.go @@ -550,8 +550,8 @@ func TestBatchSender_ShutdownDeadlock(t *testing.T) { sink := newFakeRequestSink() // Send 2 concurrent requests - go func() { require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() - go func() { require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() + go func() { assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() + go func() { assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() // Wait for the requests to enter the merge function <-waitMerge @@ -591,7 +591,7 @@ func TestBatchSenderWithTimeout(t *testing.T) { for i := 0; i < 3; i++ { wg.Add(1) go func() { - require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) wg.Done() }() } @@ -645,11 +645,11 @@ func TestBatchSenderTimerResetNoConflict(t *testing.T) { // Send 2 concurrent requests that should be merged in one batch in the same interval as the flush timer go func() { - require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() time.Sleep(30 * time.Millisecond) go func() { - require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() // The batch should be sent either with the flush interval or by reaching the minimum items size with no conflict @@ -677,10 +677,10 @@ func TestBatchSenderTimerFlush(t *testing.T) { // Send 2 concurrent requests that should be merged in one batch and sent immediately go func() { - require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() go func() { - require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() assert.EventuallyWithT(t, func(c *assert.CollectT) { assert.LessOrEqual(c, uint64(1), sink.requestsCount.Load()) @@ -689,7 +689,7 @@ func TestBatchSenderTimerFlush(t *testing.T) { // Send another request that should be flushed after 100ms instead of 50ms since last flush go func() { - require.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) + assert.NoError(t, be.Send(context.Background(), &fakeRequest{items: 4, sink: sink})) }() // Confirm that it is not flushed in 50ms diff --git a/exporter/otlphttpexporter/otlp_test.go b/exporter/otlphttpexporter/otlp_test.go index 7d9104b402f..9ee7522220e 100644 --- a/exporter/otlphttpexporter/otlp_test.go +++ b/exporter/otlphttpexporter/otlp_test.go @@ -952,10 +952,10 @@ func TestPartialSuccess_profiles(t *testing.T) { partial.SetErrorMessage("hello") partial.SetRejectedProfiles(1) bytes, err := response.MarshalProto() - require.NoError(t, err) + assert.NoError(t, err) writer.Header().Set("Content-Type", "application/x-protobuf") _, err = writer.Write(bytes) - require.NoError(t, err) + assert.NoError(t, err) }) defer srv.Close()