Skip to content

Commit

Permalink
Allow GC to collect unneeded slice elements
Browse files Browse the repository at this point in the history
  • Loading branch information
ash2k committed Oct 9, 2024
1 parent 8e9baf2 commit 13656a8
Show file tree
Hide file tree
Showing 6 changed files with 7 additions and 5 deletions.
2 changes: 1 addition & 1 deletion sdk/log/record.go
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ func (r *Record) AddAttributes(attrs ...log.KeyValue) {
//
// Do not use head(attrs, r.attributeCountLimit - n) here. If
// (r.attributeCountLimit - n) <= 0 attrs needs to be emptied.
last := max(0, (r.attributeCountLimit - n))
last := max(0, r.attributeCountLimit-n)
r.addDropped(len(attrs) - last)
attrs = attrs[:last]
}
Expand Down
1 change: 1 addition & 0 deletions sdk/metric/internal/aggregate/drop.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,5 +20,6 @@ func (r *dropRes[N]) Offer(context.Context, N, []attribute.KeyValue) {}

// Collect resets dest. No exemplars will ever be returned.
func (r *dropRes[N]) Collect(dest *[]exemplar.Exemplar) {
clear(*dest) // Erase elements to let GC collect objects
*dest = (*dest)[:0]
}
1 change: 1 addition & 0 deletions sdk/metric/internal/aggregate/exemplar.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ var exemplarPool = sync.Pool{
func collectExemplars[N int64 | float64](out *[]metricdata.Exemplar[N], f func(*[]exemplar.Exemplar)) {
dest := exemplarPool.Get().(*[]exemplar.Exemplar)
defer func() {
clear(*dest) // Erase elements to let GC collect objects
*dest = (*dest)[:0]
exemplarPool.Put(dest)
}()
Expand Down
2 changes: 2 additions & 0 deletions sdk/metric/pipeline.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics)
}
if err := ctx.Err(); err != nil {
rm.Resource = nil
clear(rm.ScopeMetrics) // Erase elements to let GC collect objects
rm.ScopeMetrics = rm.ScopeMetrics[:0]
return err
}
Expand All @@ -126,6 +127,7 @@ func (p *pipeline) produce(ctx context.Context, rm *metricdata.ResourceMetrics)
if err := ctx.Err(); err != nil {
// This means the context expired before we finished running callbacks.
rm.Resource = nil
clear(rm.ScopeMetrics) // Erase elements to let GC collect objects
rm.ScopeMetrics = rm.ScopeMetrics[:0]
return err
}
Expand Down
1 change: 1 addition & 0 deletions sdk/trace/batch_span_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -280,6 +280,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
//
// It is up to the exporter to implement any type of retry logic if a batch is failing
// to be exported, since it is specific to the protocol and backend being sent to.
clear(bsp.batch) // Erase elements to let GC collect objects
bsp.batch = bsp.batch[:0]

if err != nil {
Expand Down
5 changes: 1 addition & 4 deletions sdk/trace/span.go
Original file line number Diff line number Diff line change
Expand Up @@ -623,10 +623,7 @@ func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) {
record[a.Key] = len(unique) - 1
}
}
// s.attributes have element types of attribute.KeyValue. These types are
// not pointers and they themselves do not contain pointer fields,
// therefore the duplicate values do not need to be zeroed for them to be
// garbage collected.
clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects
s.attributes = unique
}

Expand Down

0 comments on commit 13656a8

Please sign in to comment.