-
Notifications
You must be signed in to change notification settings - Fork 1.5k
/
memorylimiter.go
314 lines (259 loc) · 9.53 KB
/
memorylimiter.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package memorylimiterprocessor // import "go.opentelemetry.io/collector/processor/memorylimiterprocessor"
import (
"context"
"errors"
"fmt"
"runtime"
"sync"
"sync/atomic"
"time"
"go.uber.org/zap"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/internal/iruntime"
"go.opentelemetry.io/collector/pdata/plog"
"go.opentelemetry.io/collector/pdata/pmetric"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.opentelemetry.io/collector/processor"
"go.opentelemetry.io/collector/processor/processorhelper"
)
const (
mibBytes = 1024 * 1024
)
var (
// errDataRefused will be returned to callers of ConsumeTraceData to indicate
// that data is being refused due to high memory usage.
errDataRefused = errors.New("data refused due to high memory usage")
errShutdownNotStarted = errors.New("no existing monitoring routine is running")
)
// make it overridable by tests
var getMemoryFn = iruntime.TotalMemory
type memoryLimiter struct {
usageChecker memUsageChecker
memCheckWait time.Duration
ballastSize uint64
// mustRefuse is used to indicate when data should be refused.
mustRefuse *atomic.Bool
ticker *time.Ticker
lastGCDone time.Time
// The function to read the mem values is set as a reference to help with
// testing different values.
readMemStatsFn func(m *runtime.MemStats)
// Fields used for logging.
logger *zap.Logger
configMismatchedLogged bool
obsrep *processorhelper.ObsReport
refCounterLock sync.Mutex
refCounter int
}
// Minimum interval between forced GC when in soft limited mode. We don't want to
// do GCs too frequently since it is a CPU-heavy operation.
const minGCIntervalWhenSoftLimited = 10 * time.Second
// newMemoryLimiter returns a new memorylimiter processor.
func newMemoryLimiter(set processor.CreateSettings, cfg *Config) (*memoryLimiter, error) {
logger := set.Logger
usageChecker, err := getMemUsageChecker(cfg, logger)
if err != nil {
return nil, err
}
logger.Info("Memory limiter configured",
zap.Uint64("limit_mib", usageChecker.memAllocLimit/mibBytes),
zap.Uint64("spike_limit_mib", usageChecker.memSpikeLimit/mibBytes),
zap.Duration("check_interval", cfg.CheckInterval))
obsrep, err := processorhelper.NewObsReport(processorhelper.ObsReportSettings{
ProcessorID: set.ID,
ProcessorCreateSettings: set,
})
if err != nil {
return nil, err
}
ml := &memoryLimiter{
usageChecker: *usageChecker,
memCheckWait: cfg.CheckInterval,
ticker: time.NewTicker(cfg.CheckInterval),
readMemStatsFn: runtime.ReadMemStats,
logger: logger,
mustRefuse: &atomic.Bool{},
obsrep: obsrep,
}
return ml, nil
}
func getMemUsageChecker(cfg *Config, logger *zap.Logger) (*memUsageChecker, error) {
memAllocLimit := uint64(cfg.MemoryLimitMiB) * mibBytes
memSpikeLimit := uint64(cfg.MemorySpikeLimitMiB) * mibBytes
if cfg.MemoryLimitMiB != 0 {
return newFixedMemUsageChecker(memAllocLimit, memSpikeLimit), nil
}
totalMemory, err := getMemoryFn()
if err != nil {
return nil, fmt.Errorf("failed to get total memory, use fixed memory settings (limit_mib): %w", err)
}
logger.Info("Using percentage memory limiter",
zap.Uint64("total_memory_mib", totalMemory/mibBytes),
zap.Uint32("limit_percentage", cfg.MemoryLimitPercentage),
zap.Uint32("spike_limit_percentage", cfg.MemorySpikePercentage))
return newPercentageMemUsageChecker(totalMemory, uint64(cfg.MemoryLimitPercentage),
uint64(cfg.MemorySpikePercentage)), nil
}
func (ml *memoryLimiter) start(_ context.Context, host component.Host) error {
extensions := host.GetExtensions()
for _, extension := range extensions {
if ext, ok := extension.(interface{ GetBallastSize() uint64 }); ok {
ml.ballastSize = ext.GetBallastSize()
break
}
}
ml.startMonitoring()
return nil
}
func (ml *memoryLimiter) shutdown(context.Context) error {
ml.refCounterLock.Lock()
defer ml.refCounterLock.Unlock()
if ml.refCounter == 0 {
return errShutdownNotStarted
} else if ml.refCounter == 1 {
ml.ticker.Stop()
}
ml.refCounter--
return nil
}
func (ml *memoryLimiter) processTraces(ctx context.Context, td ptrace.Traces) (ptrace.Traces, error) {
numSpans := td.SpanCount()
if ml.mustRefuse.Load() {
// TODO: actually to be 100% sure that this is "refused" and not "dropped"
// it is necessary to check the pipeline to see if this is directly connected
// to a receiver (ie.: a receiver is on the call stack). For now it
// assumes that the pipeline is properly configured and a receiver is on the
// callstack and that the receiver will correctly retry the refused data again.
ml.obsrep.TracesRefused(ctx, numSpans)
return td, errDataRefused
}
// Even if the next consumer returns error record the data as accepted by
// this processor.
ml.obsrep.TracesAccepted(ctx, numSpans)
return td, nil
}
func (ml *memoryLimiter) processMetrics(ctx context.Context, md pmetric.Metrics) (pmetric.Metrics, error) {
numDataPoints := md.DataPointCount()
if ml.mustRefuse.Load() {
// TODO: actually to be 100% sure that this is "refused" and not "dropped"
// it is necessary to check the pipeline to see if this is directly connected
// to a receiver (ie.: a receiver is on the call stack). For now it
// assumes that the pipeline is properly configured and a receiver is on the
// callstack.
ml.obsrep.MetricsRefused(ctx, numDataPoints)
return md, errDataRefused
}
// Even if the next consumer returns error record the data as accepted by
// this processor.
ml.obsrep.MetricsAccepted(ctx, numDataPoints)
return md, nil
}
func (ml *memoryLimiter) processLogs(ctx context.Context, ld plog.Logs) (plog.Logs, error) {
numRecords := ld.LogRecordCount()
if ml.mustRefuse.Load() {
// TODO: actually to be 100% sure that this is "refused" and not "dropped"
// it is necessary to check the pipeline to see if this is directly connected
// to a receiver (ie.: a receiver is on the call stack). For now it
// assumes that the pipeline is properly configured and a receiver is on the
// callstack.
ml.obsrep.LogsRefused(ctx, numRecords)
return ld, errDataRefused
}
// Even if the next consumer returns error record the data as accepted by
// this processor.
ml.obsrep.LogsAccepted(ctx, numRecords)
return ld, nil
}
func (ml *memoryLimiter) readMemStats() *runtime.MemStats {
ms := &runtime.MemStats{}
ml.readMemStatsFn(ms)
// If proper configured ms.Alloc should be at least ml.ballastSize but since
// a misconfiguration is possible check for that here.
if ms.Alloc >= ml.ballastSize {
ms.Alloc -= ml.ballastSize
} else if !ml.configMismatchedLogged {
// This indicates misconfiguration. Log it once.
ml.configMismatchedLogged = true
ml.logger.Warn(`"size_mib" in ballast extension is likely incorrectly configured.`)
}
return ms
}
// startMonitoring starts a single ticker'd goroutine per instance
// that will check memory usage every checkInterval period.
func (ml *memoryLimiter) startMonitoring() {
ml.refCounterLock.Lock()
defer ml.refCounterLock.Unlock()
ml.refCounter++
if ml.refCounter == 1 {
go func() {
for range ml.ticker.C {
ml.checkMemLimits()
}
}()
}
}
func memstatToZapField(ms *runtime.MemStats) zap.Field {
return zap.Uint64("cur_mem_mib", ms.Alloc/mibBytes)
}
func (ml *memoryLimiter) doGCandReadMemStats() *runtime.MemStats {
runtime.GC()
ml.lastGCDone = time.Now()
ms := ml.readMemStats()
ml.logger.Info("Memory usage after GC.", memstatToZapField(ms))
return ms
}
func (ml *memoryLimiter) checkMemLimits() {
ms := ml.readMemStats()
ml.logger.Debug("Currently used memory.", memstatToZapField(ms))
if ml.usageChecker.aboveHardLimit(ms) {
ml.logger.Warn("Memory usage is above hard limit. Forcing a GC.", memstatToZapField(ms))
ms = ml.doGCandReadMemStats()
}
// Remember current state.
wasRefusing := ml.mustRefuse.Load()
// Check if the memory usage is above the soft limit.
mustRefuse := ml.usageChecker.aboveSoftLimit(ms)
if wasRefusing && !mustRefuse {
// Was previously refusing but enough memory is available now, no need to limit.
ml.logger.Info("Memory usage back within limits. Resuming normal operation.", memstatToZapField(ms))
}
if !wasRefusing && mustRefuse {
// We are above soft limit, do a GC if it wasn't done recently and see if
// it brings memory usage below the soft limit.
if time.Since(ml.lastGCDone) > minGCIntervalWhenSoftLimited {
ml.logger.Info("Memory usage is above soft limit. Forcing a GC.", memstatToZapField(ms))
ms = ml.doGCandReadMemStats()
// Check the limit again to see if GC helped.
mustRefuse = ml.usageChecker.aboveSoftLimit(ms)
}
if mustRefuse {
ml.logger.Warn("Memory usage is above soft limit. Refusing data.", memstatToZapField(ms))
}
}
ml.mustRefuse.Store(mustRefuse)
}
type memUsageChecker struct {
memAllocLimit uint64
memSpikeLimit uint64
}
func (d memUsageChecker) aboveSoftLimit(ms *runtime.MemStats) bool {
return ms.Alloc >= d.memAllocLimit-d.memSpikeLimit
}
func (d memUsageChecker) aboveHardLimit(ms *runtime.MemStats) bool {
return ms.Alloc >= d.memAllocLimit
}
func newFixedMemUsageChecker(memAllocLimit, memSpikeLimit uint64) *memUsageChecker {
if memSpikeLimit == 0 {
// If spike limit is unspecified use 20% of mem limit.
memSpikeLimit = memAllocLimit / 5
}
return &memUsageChecker{
memAllocLimit: memAllocLimit,
memSpikeLimit: memSpikeLimit,
}
}
func newPercentageMemUsageChecker(totalMemory uint64, percentageLimit, percentageSpike uint64) *memUsageChecker {
return newFixedMemUsageChecker(percentageLimit*totalMemory/100, percentageSpike*totalMemory/100)
}