Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use cache instead of map in the plugin promwrapper #14800

Merged
merged 9 commits into from
Nov 4, 2024
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/tricky-candles-matter.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"chainlink": patch
---

#bugfix Memory leak fix on promwrapper
75 changes: 51 additions & 24 deletions core/services/ocr2/plugins/promwrapper/plugin.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,17 +5,26 @@ package promwrapper

import (
"context"
"encoding/json"
"fmt"
"math/big"
"sync"
"time"

"github.com/ethereum/go-ethereum/common"
"github.com/patrickmn/go-cache"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/smartcontractkit/libocr/offchainreporting2plus/types"
)

const (
// DefaultExpiration is the default expiration time for cache items.
DefaultExpiration = 30 * time.Minute

// DefaultCleanupInterval is the default interval for cache cleanup.
DefaultCleanupInterval = 5 * time.Minute
0xnogo marked this conversation as resolved.
Show resolved Hide resolved
)

// Type assertions, buckets and labels.
var (
_ types.ReportingPlugin = &promPlugin{}
Expand Down Expand Up @@ -160,10 +169,10 @@ type (
chainID *big.Int
oracleID string
configDigest string
queryEndTimes sync.Map
observationEndTimes sync.Map
reportEndTimes sync.Map
acceptFinalizedReportEndTimes sync.Map
queryEndTimes *cache.Cache
observationEndTimes *cache.Cache
reportEndTimes *cache.Cache
acceptFinalizedReportEndTimes *cache.Cache
prometheusBackend PrometheusBackend
}
)
Expand Down Expand Up @@ -223,13 +232,17 @@ func New(
}

return &promPlugin{
wrapped: plugin,
name: name,
chainType: chainType,
chainID: chainID,
oracleID: fmt.Sprintf("%d", config.OracleID),
configDigest: common.Bytes2Hex(config.ConfigDigest[:]),
prometheusBackend: prometheusBackend,
wrapped: plugin,
name: name,
chainType: chainType,
chainID: chainID,
oracleID: fmt.Sprintf("%d", config.OracleID),
configDigest: common.Bytes2Hex(config.ConfigDigest[:]),
prometheusBackend: prometheusBackend,
queryEndTimes: cache.New(DefaultExpiration, DefaultCleanupInterval),
observationEndTimes: cache.New(DefaultExpiration, DefaultCleanupInterval),
reportEndTimes: cache.New(DefaultExpiration, DefaultCleanupInterval),
acceptFinalizedReportEndTimes: cache.New(DefaultExpiration, DefaultCleanupInterval),
0xnogo marked this conversation as resolved.
Show resolved Hide resolved
}
}

Expand All @@ -238,7 +251,7 @@ func (p *promPlugin) Query(ctx context.Context, timestamp types.ReportTimestamp)
defer func() {
duration := float64(time.Now().UTC().Sub(start))
p.prometheusBackend.SetQueryDuration(getLabelsValues(p, timestamp), duration)
p.queryEndTimes.Store(timestamp, time.Now().UTC()) // note time at end of Query()
p.setEndTime(timestamp, p.queryEndTimes) // note time at end of Query()
}()

return p.wrapped.Query(ctx, timestamp)
Expand All @@ -247,19 +260,20 @@ func (p *promPlugin) Query(ctx context.Context, timestamp types.ReportTimestamp)
func (p *promPlugin) Observation(ctx context.Context, timestamp types.ReportTimestamp, query types.Query) (types.Observation, error) {
start := time.Now().UTC()

key := timestampToKey(timestamp)
// Report latency between Query() and Observation().
labelValues := getLabelsValues(p, timestamp)
if queryEndTime, ok := p.queryEndTimes.Load(timestamp); ok {
if queryEndTime, ok := p.queryEndTimes.Get(key); ok {
latency := float64(start.Sub(queryEndTime.(time.Time)))
p.prometheusBackend.SetQueryToObservationLatency(labelValues, latency)
p.queryEndTimes.Delete(timestamp)
p.queryEndTimes.Delete(key)
}

// Report latency for Observation() at end of call.
defer func() {
duration := float64(time.Now().UTC().Sub(start))
p.prometheusBackend.SetObservationDuration(labelValues, duration)
p.observationEndTimes.Store(timestamp, time.Now().UTC()) // note time at end of Observe()
p.setEndTime(timestamp, p.observationEndTimes) // note time at end of Observe()
}()

return p.wrapped.Observation(ctx, timestamp, query)
Expand All @@ -268,19 +282,20 @@ func (p *promPlugin) Observation(ctx context.Context, timestamp types.ReportTime
func (p *promPlugin) Report(ctx context.Context, timestamp types.ReportTimestamp, query types.Query, observations []types.AttributedObservation) (bool, types.Report, error) {
start := time.Now().UTC()

key := timestampToKey(timestamp)
// Report latency between Observation() and Report().
labelValues := getLabelsValues(p, timestamp)
if observationEndTime, ok := p.observationEndTimes.Load(timestamp); ok {
if observationEndTime, ok := p.observationEndTimes.Get(key); ok {
latency := float64(start.Sub(observationEndTime.(time.Time)))
p.prometheusBackend.SetObservationToReportLatency(labelValues, latency)
p.observationEndTimes.Delete(timestamp)
p.observationEndTimes.Delete(key)
}

// Report latency for Report() at end of call.
defer func() {
duration := float64(time.Now().UTC().Sub(start))
p.prometheusBackend.SetReportDuration(labelValues, duration)
p.reportEndTimes.Store(timestamp, time.Now().UTC()) // note time at end of Report()
p.setEndTime(timestamp, p.reportEndTimes) // note time at end of Report()
}()

return p.wrapped.Report(ctx, timestamp, query, observations)
Expand All @@ -289,19 +304,20 @@ func (p *promPlugin) Report(ctx context.Context, timestamp types.ReportTimestamp
func (p *promPlugin) ShouldAcceptFinalizedReport(ctx context.Context, timestamp types.ReportTimestamp, report types.Report) (bool, error) {
start := time.Now().UTC()

key := timestampToKey(timestamp)
// Report latency between Report() and ShouldAcceptFinalizedReport().
labelValues := getLabelsValues(p, timestamp)
if reportEndTime, ok := p.reportEndTimes.Load(timestamp); ok {
if reportEndTime, ok := p.reportEndTimes.Get(key); ok {
latency := float64(start.Sub(reportEndTime.(time.Time)))
p.prometheusBackend.SetReportToAcceptFinalizedReportLatency(labelValues, latency)
p.reportEndTimes.Delete(timestamp)
p.reportEndTimes.Delete(key)
}

// Report latency for ShouldAcceptFinalizedReport() at end of call.
defer func() {
duration := float64(time.Now().UTC().Sub(start))
p.prometheusBackend.SetShouldAcceptFinalizedReportDuration(labelValues, duration)
p.acceptFinalizedReportEndTimes.Store(timestamp, time.Now().UTC()) // note time at end of ShouldAcceptFinalizedReport()
p.setEndTime(timestamp, p.acceptFinalizedReportEndTimes) // note time at end of ShouldAcceptFinalizedReport()
}()

return p.wrapped.ShouldAcceptFinalizedReport(ctx, timestamp, report)
Expand All @@ -310,12 +326,13 @@ func (p *promPlugin) ShouldAcceptFinalizedReport(ctx context.Context, timestamp
func (p *promPlugin) ShouldTransmitAcceptedReport(ctx context.Context, timestamp types.ReportTimestamp, report types.Report) (bool, error) {
start := time.Now().UTC()

key := timestampToKey(timestamp)
// Report latency between ShouldAcceptFinalizedReport() and ShouldTransmitAcceptedReport().
labelValues := getLabelsValues(p, timestamp)
if acceptFinalizedReportEndTime, ok := p.acceptFinalizedReportEndTimes.Load(timestamp); ok {
if acceptFinalizedReportEndTime, ok := p.acceptFinalizedReportEndTimes.Get(key); ok {
latency := float64(start.Sub(acceptFinalizedReportEndTime.(time.Time)))
p.prometheusBackend.SetAcceptFinalizedReportToTransmitAcceptedReportLatency(labelValues, latency)
p.acceptFinalizedReportEndTimes.Delete(timestamp)
p.acceptFinalizedReportEndTimes.Delete(key)
0xnogo marked this conversation as resolved.
Show resolved Hide resolved
}

defer func() {
Expand Down Expand Up @@ -343,3 +360,13 @@ func (p *promPlugin) Close() error {

return p.wrapped.Close()
}

func (p *promPlugin) setEndTime(timestamp types.ReportTimestamp, cache *cache.Cache) {
cache.SetDefault(timestampToKey(timestamp), time.Now().UTC())
}

func timestampToKey(timestamp types.ReportTimestamp) string {
jsonData, _ := json.Marshal(timestamp)
0xnogo marked this conversation as resolved.
Show resolved Hide resolved
key := string(jsonData)
return key
}
38 changes: 19 additions & 19 deletions core/services/ocr2/plugins/promwrapper/plugin_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,12 +69,12 @@ func TestPlugin_MustInstantiate(t *testing.T) {
// Ensure instantiation without panic for no override backend.
var reportingPlugin = &fakeReportingPlugin{}
promPlugin := New(reportingPlugin, "test", "EVM", big.NewInt(1), types.ReportingPluginConfig{}, nil)
require.NotEqual(t, nil, promPlugin)
require.NotNil(t, promPlugin)

// Ensure instantiation without panic for override provided.
backend := mocks.NewPrometheusBackend(t)
promPlugin = New(reportingPlugin, "test-2", "EVM", big.NewInt(1), types.ReportingPluginConfig{}, backend)
require.NotEqual(t, nil, promPlugin)
require.NotNil(t, promPlugin)
}

func TestPlugin_GetLatencies(t *testing.T) {
Expand Down Expand Up @@ -194,45 +194,45 @@ func TestPlugin_GetLatencies(t *testing.T) {
types.ReportingPluginConfig{ConfigDigest: reportTimestamp.ConfigDigest},
backend,
).(*promPlugin)
require.NotEqual(t, nil, promPlugin)
require.NotNil(t, promPlugin)

ctx := testutils.Context(t)

// Run OCR methods.
_, err := promPlugin.Query(ctx, reportTimestamp)
require.NoError(t, err)
_, ok := promPlugin.queryEndTimes.Load(reportTimestamp)
require.Equal(t, true, ok)
_, ok := promPlugin.queryEndTimes.Get(timestampToKey(reportTimestamp))
require.True(t, ok)
time.Sleep(qToOLatency)

_, err = promPlugin.Observation(ctx, reportTimestamp, nil)
require.NoError(t, err)
_, ok = promPlugin.queryEndTimes.Load(reportTimestamp)
require.Equal(t, false, ok)
_, ok = promPlugin.observationEndTimes.Load(reportTimestamp)
require.Equal(t, true, ok)
_, ok = promPlugin.queryEndTimes.Get(timestampToKey(reportTimestamp))
require.False(t, ok)
_, ok = promPlugin.observationEndTimes.Get(timestampToKey(reportTimestamp))
require.True(t, ok)
time.Sleep(oToRLatency)

_, _, err = promPlugin.Report(ctx, reportTimestamp, nil, nil)
require.NoError(t, err)
_, ok = promPlugin.observationEndTimes.Load(reportTimestamp)
require.Equal(t, false, ok)
_, ok = promPlugin.reportEndTimes.Load(reportTimestamp)
require.Equal(t, true, ok)
_, ok = promPlugin.observationEndTimes.Get(timestampToKey(reportTimestamp))
require.False(t, ok)
_, ok = promPlugin.reportEndTimes.Get(timestampToKey(reportTimestamp))
require.True(t, ok)
time.Sleep(rToALatency)

_, err = promPlugin.ShouldAcceptFinalizedReport(ctx, reportTimestamp, nil)
require.NoError(t, err)
_, ok = promPlugin.reportEndTimes.Load(reportTimestamp)
require.Equal(t, false, ok)
_, ok = promPlugin.acceptFinalizedReportEndTimes.Load(reportTimestamp)
require.Equal(t, true, ok)
_, ok = promPlugin.reportEndTimes.Get(timestampToKey(reportTimestamp))
require.False(t, ok)
_, ok = promPlugin.acceptFinalizedReportEndTimes.Get(timestampToKey(reportTimestamp))
require.True(t, ok)
time.Sleep(aToTLatency)

_, err = promPlugin.ShouldTransmitAcceptedReport(ctx, reportTimestamp, nil)
require.NoError(t, err)
_, ok = promPlugin.acceptFinalizedReportEndTimes.Load(reportTimestamp)
require.Equal(t, false, ok)
_, ok = promPlugin.acceptFinalizedReportEndTimes.Get(timestampToKey(reportTimestamp))
require.False(t, ok)

// Close.
err = promPlugin.Close()
Expand Down
Loading