Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 42 additions & 0 deletions collector/cache.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
// Copyright (c) 2025, Oracle and/or its affiliates.
// Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.

package collector

import (
"github.com/prometheus/client_golang/prometheus"
"time"
)

func NewMetricsCache(metrics []*Metric) *MetricsCache {
c := map[*Metric]*MetricCacheRecord{}

for _, metric := range metrics {
c[metric] = &MetricCacheRecord{
PrometheusMetrics: map[string]prometheus.Metric{},
LastScraped: nil,
}
}
return &MetricsCache{
cache: c,
}
}

func (c *MetricsCache) SetLastScraped(m *Metric, tick *time.Time) {
c.cache[m].LastScraped = tick
}

func (c *MetricsCache) GetLastScraped(m *Metric) *time.Time {
return c.cache[m].LastScraped
}

func (c *MetricsCache) SendAll(ch chan<- prometheus.Metric, m *Metric) {
for _, pm := range c.cache[m].PrometheusMetrics {
ch <- pm
}
}

func (c *MetricsCache) CacheAndSend(ch chan<- prometheus.Metric, m *Metric, metric prometheus.Metric) {
c.cache[m].PrometheusMetrics[metric.Desc().String()] = metric
ch <- metric
}
20 changes: 13 additions & 7 deletions collector/collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,11 +104,10 @@ func NewExporter(logger *slog.Logger, m *MetricsConfiguration) *Exporter {
logger: logger,
MetricsConfiguration: m,
databases: databases,
lastScraped: map[string]*time.Time{},
allConstLabels: allConstLabels,
}
e.metricsToScrape = e.DefaultMetrics()

e.initCache()
return e
}

Expand Down Expand Up @@ -253,7 +252,7 @@ func (e *Exporter) scrapeDatabase(ch chan<- prometheus.Metric, errChan chan<- er
go func() {
// If the metric doesn't need to be scraped, send the cached values
if !isScrapeMetric {
metric.sendAll(ch)
d.MetricsCache.SendAll(ch, metric)
errChan <- nil
return
}
Expand Down Expand Up @@ -416,6 +415,7 @@ func (e *Exporter) reloadMetrics() {
} else {
e.logger.Debug("No custom metrics defined.")
}
e.initCache()
}

// ScrapeMetric is an interface method to call scrapeGenericValues using Metric struct values
Expand Down Expand Up @@ -475,9 +475,9 @@ func (e *Exporter) scrapeGenericValues(d *Database, ch chan<- prometheus.Metric,
}
buckets[lelimit] = counter
}
m.cacheAndSend(ch, prometheus.MustNewConstHistogram(desc, count, value, buckets, labelsValues...))
d.MetricsCache.CacheAndSend(ch, m, prometheus.MustNewConstHistogram(desc, count, value, buckets, labelsValues...))
} else {
m.cacheAndSend(ch, prometheus.MustNewConstMetric(desc, getMetricType(metric, m.MetricsType), value, labelsValues...))
d.MetricsCache.CacheAndSend(ch, m, prometheus.MustNewConstMetric(desc, getMetricType(metric, m.MetricsType), value, labelsValues...))
}
// If no labels, use metric name
} else {
Expand Down Expand Up @@ -509,9 +509,9 @@ func (e *Exporter) scrapeGenericValues(d *Database, ch chan<- prometheus.Metric,
}
buckets[lelimit] = counter
}
m.cacheAndSend(ch, prometheus.MustNewConstHistogram(desc, count, value, buckets))
d.MetricsCache.CacheAndSend(ch, m, prometheus.MustNewConstHistogram(desc, count, value, buckets))
} else {
m.cacheAndSend(ch, prometheus.MustNewConstMetric(desc, getMetricType(metric, m.MetricsType), value))
d.MetricsCache.CacheAndSend(ch, m, prometheus.MustNewConstMetric(desc, getMetricType(metric, m.MetricsType), value))
}
}
metricsCount++
Expand Down Expand Up @@ -578,6 +578,12 @@ func (e *Exporter) generatePrometheusMetrics(d *Database, parse func(row map[str
return nil
}

func (e *Exporter) initCache() {
for _, d := range e.databases {
d.initCache(e.metricsToScrape.Metric)
}
}

func getMetricType(metricType string, metricsType map[string]string) prometheus.ValueType {
var strToPromType = map[string]prometheus.ValueType{
"gauge": prometheus.GaugeValue,
Expand Down
5 changes: 5 additions & 0 deletions collector/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,11 @@ func NewDatabase(logger *slog.Logger, dbname string, dbconfig DatabaseConfig) *D
}
}

// initCache resets the metrics cached. Used on startup and when metrics are reloaded.
func (d *Database) initCache(metrics []*Metric) {
d.MetricsCache = NewMetricsCache(metrics)
}

// WarmupConnectionPool serially acquires connections to "warm up" the connection pool.
// This is a workaround for a perceived bug in ODPI_C where rapid acquisition of connections
// results in a SIGABRT.
Expand Down
5 changes: 2 additions & 3 deletions collector/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,13 @@ func (e *Exporter) isScrapeMetric(tick *time.Time, metric *Metric, d *Database)
if !ok {
return true
}
id := metric.id(d.Name)
lastScraped := e.lastScraped[id]
lastScraped := d.MetricsCache.GetLastScraped(metric)
shouldScrape := lastScraped == nil ||
// If the metric's scrape interval is less than the time elapsed since the last scrape,
// we should scrape the metric.
interval < tick.Sub(*lastScraped)
if shouldScrape {
e.lastScraped[id] = tick
d.MetricsCache.SetLastScraped(metric, lastScraped)
}
return shouldScrape
}
Expand Down
71 changes: 31 additions & 40 deletions collector/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ import (
"github.com/godror/godror/dsn"
"github.com/prometheus/client_golang/prometheus"
"log/slog"
"strings"
"sync"
"time"
)
Expand All @@ -23,7 +22,6 @@ type Exporter struct {
scrapeResults []prometheus.Metric
databases []*Database
logger *slog.Logger
lastScraped map[string]*time.Time
allConstLabels []string
}

Expand All @@ -33,6 +31,26 @@ type Database struct {
Session *sql.DB
Type float64
Config DatabaseConfig
// MetricsCache holds computed metrics for a database, so these metrics are available on each scrape.
// Given a metric's scrape configuration, it may not be computed on the same interval as other metrics.
MetricsCache *MetricsCache
}

type MetricsCache struct {
// The outer map is to be initialized at startup, and when metrics are reloaded.
// Read access is concurrent, write access is (and must) be from a single thread.
cache map[*Metric]*MetricCacheRecord
}

// MetricCacheRecord stores metadata associated with a given Metric
// As one metric may have multiple prometheus.Metric representations,
// These are cached as a map value.
type MetricCacheRecord struct {
// PrometheusMetrics stores cached prometheus metric values.
// Used when custom scrape intervals are used, and the metric must be returned to the collector, but not scraped.
PrometheusMetrics map[string]prometheus.Metric
// LastScraped is the collector tick time when the metric was last computed.
LastScraped *time.Time
}

type Config struct {
Expand All @@ -57,18 +75,17 @@ type Config struct {

// Metric is an object description
type Metric struct {
Context string
Labels []string
MetricsDesc map[string]string
MetricsType map[string]string
MetricsBuckets map[string]map[string]string
FieldToAppend string
Request string
IgnoreZeroResult bool
QueryTimeout string
ScrapeInterval string
Databases []string
PrometheusMetrics map[string]prometheus.Metric
Context string
Labels []string
MetricsDesc map[string]string
MetricsType map[string]string
MetricsBuckets map[string]map[string]string
FieldToAppend string
Request string
IgnoreZeroResult bool
QueryTimeout string
ScrapeInterval string
Databases []string
}

// Metrics is a container structure for prometheus metrics
Expand All @@ -78,29 +95,3 @@ type Metrics struct {

type ScrapeContext struct {
}

func (m *Metric) id(dbname string) string {
builder := strings.Builder{}
builder.WriteString(dbname)
builder.WriteString(m.Context)
for _, d := range m.MetricsDesc {
builder.WriteString(d)
}
return builder.String()
}

// sendAll sends all cached metrics to the collector.
func (m *Metric) sendAll(ch chan<- prometheus.Metric) {
for _, metric := range m.PrometheusMetrics {
ch <- metric
}
}

// cacheAndSend caches the metric and sends it to the collector.
func (m *Metric) cacheAndSend(ch chan<- prometheus.Metric, metric prometheus.Metric) {
if len(m.PrometheusMetrics) == 0 {
m.PrometheusMetrics = map[string]prometheus.Metric{}
}
m.PrometheusMetrics[metric.Desc().String()] = metric
ch <- metric
}