Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 27 additions & 28 deletions collector/collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -248,11 +248,15 @@ func (e *Exporter) scrapeDatabase(ch chan<- prometheus.Metric, errChan chan<- er
metricsToScrape := 0
for _, metric := range e.metricsToScrape.Metric {
metric := metric //https://golang.org/doc/faq#closures_and_goroutines
if !e.isScrapeMetric(tick, metric, d) {
continue
}
isScrapeMetric := e.isScrapeMetric(tick, metric, d)
metricsToScrape++
go func() {
// If the metric doesn't need to be scraped, send the cached values
if !isScrapeMetric {
metric.sendAll(ch)
errChan <- nil
return
}
e.logger.Debug("About to scrape metric",
"Context", metric.Context,
"MetricsDesc", fmt.Sprint(metric.MetricsDesc),
Expand Down Expand Up @@ -389,7 +393,7 @@ func hashFile(h hash.Hash, fn string) error {

func (e *Exporter) reloadMetrics() {
// Truncate metricsToScrape
e.metricsToScrape.Metric = []Metric{}
e.metricsToScrape.Metric = []*Metric{}

// Load default metrics
defaultMetrics := e.DefaultMetrics()
Expand All @@ -415,29 +419,24 @@ func (e *Exporter) reloadMetrics() {
}

// ScrapeMetric is an interface method to call scrapeGenericValues using Metric struct values
func (e *Exporter) ScrapeMetric(d *Database, ch chan<- prometheus.Metric, m Metric) error {
func (e *Exporter) ScrapeMetric(d *Database, ch chan<- prometheus.Metric, m *Metric) error {
e.logger.Debug("Calling function ScrapeGenericValues()")
queryTimeout := e.getQueryTimeout(m, d)
return e.scrapeGenericValues(d, ch, m.Context, m.Labels, m.MetricsDesc,
m.MetricsType, m.MetricsBuckets, m.FieldToAppend, m.IgnoreZeroResult,
m.Request, queryTimeout)
return e.scrapeGenericValues(d, ch, m)
}

// generic method for retrieving metrics.
func (e *Exporter) scrapeGenericValues(d *Database, ch chan<- prometheus.Metric, context string, labels []string,
metricsDesc map[string]string, metricsType map[string]string, metricsBuckets map[string]map[string]string,
fieldToAppend string, ignoreZeroResult bool, request string, queryTimeout time.Duration) error {
func (e *Exporter) scrapeGenericValues(d *Database, ch chan<- prometheus.Metric, m *Metric) error {
metricsCount := 0
constLabels := d.constLabels(e.constLabels())
e.logger.Debug("labels", constLabels)
genericParser := func(row map[string]string) error {
// Construct labels value
labelsValues := []string{}
for _, label := range labels {
for _, label := range m.Labels {
labelsValues = append(labelsValues, row[label])
}
// Construct Prometheus values to sent back
for metric, metricHelp := range metricsDesc {
for metric, metricHelp := range m.MetricsDesc {
value, ok := e.parseFloat(metric, metricHelp, row)
if !ok {
// Skip invalid metric values
Expand All @@ -446,22 +445,22 @@ func (e *Exporter) scrapeGenericValues(d *Database, ch chan<- prometheus.Metric,
e.logger.Debug("Query result",
"value", value)
// If metric do not use a field content in metric's name
if strings.Compare(fieldToAppend, "") == 0 {
if strings.Compare(m.FieldToAppend, "") == 0 {
desc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, context, metric),
prometheus.BuildFQName(namespace, m.Context, metric),
metricHelp,
labels,
m.Labels,
constLabels,
)
if metricsType[strings.ToLower(metric)] == "histogram" {
if m.MetricsType[strings.ToLower(metric)] == "histogram" {
count, err := strconv.ParseUint(strings.TrimSpace(row["count"]), 10, 64)
if err != nil {
e.logger.Error("Unable to convert count value to int (metric=" + metric +
",metricHelp=" + metricHelp + ",value=<" + row["count"] + ">)")
continue
}
buckets := make(map[float64]uint64)
for field, le := range metricsBuckets[metric] {
for field, le := range m.MetricsBuckets[metric] {
lelimit, err := strconv.ParseFloat(strings.TrimSpace(le), 64)
if err != nil {
e.logger.Error("Unable to convert bucket limit value to float (metric=" + metric +
Expand All @@ -476,26 +475,26 @@ func (e *Exporter) scrapeGenericValues(d *Database, ch chan<- prometheus.Metric,
}
buckets[lelimit] = counter
}
ch <- prometheus.MustNewConstHistogram(desc, count, value, buckets, labelsValues...)
m.cacheAndSend(ch, prometheus.MustNewConstHistogram(desc, count, value, buckets, labelsValues...))
} else {
ch <- prometheus.MustNewConstMetric(desc, getMetricType(metric, metricsType), value, labelsValues...)
m.cacheAndSend(ch, prometheus.MustNewConstMetric(desc, getMetricType(metric, m.MetricsType), value, labelsValues...))
}
// If no labels, use metric name
} else {
desc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, context, cleanName(row[fieldToAppend])),
prometheus.BuildFQName(namespace, m.Context, cleanName(row[m.FieldToAppend])),
metricHelp,
nil, constLabels,
)
if metricsType[strings.ToLower(metric)] == "histogram" {
if m.MetricsType[strings.ToLower(metric)] == "histogram" {
count, err := strconv.ParseUint(strings.TrimSpace(row["count"]), 10, 64)
if err != nil {
e.logger.Error("Unable to convert count value to int (metric=" + metric +
",metricHelp=" + metricHelp + ",value=<" + row["count"] + ">)")
continue
}
buckets := make(map[float64]uint64)
for field, le := range metricsBuckets[metric] {
for field, le := range m.MetricsBuckets[metric] {
lelimit, err := strconv.ParseFloat(strings.TrimSpace(le), 64)
if err != nil {
e.logger.Error("Unable to convert bucket limit value to float (metric=" + metric +
Expand All @@ -510,22 +509,22 @@ func (e *Exporter) scrapeGenericValues(d *Database, ch chan<- prometheus.Metric,
}
buckets[lelimit] = counter
}
ch <- prometheus.MustNewConstHistogram(desc, count, value, buckets)
m.cacheAndSend(ch, prometheus.MustNewConstHistogram(desc, count, value, buckets))
} else {
ch <- prometheus.MustNewConstMetric(desc, getMetricType(metric, metricsType), value)
m.cacheAndSend(ch, prometheus.MustNewConstMetric(desc, getMetricType(metric, m.MetricsType), value))
}
}
metricsCount++
}
return nil
}
e.logger.Debug("Calling function GeneratePrometheusMetrics()")
err := e.generatePrometheusMetrics(d, genericParser, request, queryTimeout)
err := e.generatePrometheusMetrics(d, genericParser, m.Request, e.getQueryTimeout(m, d))
e.logger.Debug("ScrapeGenericValues() - metricsCount: " + strconv.Itoa(metricsCount))
if err != nil {
return err
}
if !ignoreZeroResult && metricsCount == 0 {
if !m.IgnoreZeroResult && metricsCount == 0 {
// a zero result error is returned for caller error identification.
// https://github.com/oracle/oracle-db-appdev-monitoring/issues/168
return newZeroResultError()
Expand Down
4 changes: 2 additions & 2 deletions collector/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import (
// isScrapeMetric returns true if a metric should be scraped. Metrics may not be scraped if they have a custom scrape interval,
// and the time since the last scrape is less than the custom scrape interval.
// If there is no tick time or last known tick, the metric is always scraped.
func (e *Exporter) isScrapeMetric(tick *time.Time, metric Metric, d *Database) bool {
func (e *Exporter) isScrapeMetric(tick *time.Time, metric *Metric, d *Database) bool {
if len(metric.Databases) > 0 {
if !slices.Contains(metric.Databases, d.Name) {
return false
Expand Down Expand Up @@ -52,7 +52,7 @@ func (e *Exporter) getScrapeInterval(context, scrapeInterval string) (time.Durat
return 0, false
}

func (e *Exporter) getQueryTimeout(metric Metric, d *Database) time.Duration {
func (e *Exporter) getQueryTimeout(metric *Metric, d *Database) time.Duration {
if len(metric.QueryTimeout) > 0 {
qt, err := time.ParseDuration(metric.QueryTimeout)
if err != nil {
Expand Down
53 changes: 35 additions & 18 deletions collector/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,11 +28,11 @@ type Exporter struct {
}

type Database struct {
Name string
Up float64
Session *sql.DB
Type float64
Config DatabaseConfig
Name string
Up float64
Session *sql.DB
Type float64
Config DatabaseConfig
}

type Config struct {
Expand All @@ -57,28 +57,29 @@ type Config struct {

// Metric is an object description
type Metric struct {
Context string
Labels []string
MetricsDesc map[string]string
MetricsType map[string]string
MetricsBuckets map[string]map[string]string
FieldToAppend string
Request string
IgnoreZeroResult bool
QueryTimeout string
ScrapeInterval string
Databases []string
Context string
Labels []string
MetricsDesc map[string]string
MetricsType map[string]string
MetricsBuckets map[string]map[string]string
FieldToAppend string
Request string
IgnoreZeroResult bool
QueryTimeout string
ScrapeInterval string
Databases []string
PrometheusMetrics map[string]prometheus.Metric
}

// Metrics is a container structure for prometheus metrics
type Metrics struct {
Metric []Metric
Metric []*Metric
}

type ScrapeContext struct {
}

func (m Metric) id(dbname string) string {
func (m *Metric) id(dbname string) string {
builder := strings.Builder{}
builder.WriteString(dbname)
builder.WriteString(m.Context)
Expand All @@ -87,3 +88,19 @@ func (m Metric) id(dbname string) string {
}
return builder.String()
}

// sendAll sends all cached metrics to the collector.
func (m *Metric) sendAll(ch chan<- prometheus.Metric) {
for _, metric := range m.PrometheusMetrics {
ch <- metric
}
}

// cacheAndSend caches the metric and sends it to the collector.
func (m *Metric) cacheAndSend(ch chan<- prometheus.Metric, metric prometheus.Metric) {
if len(m.PrometheusMetrics) == 0 {
m.PrometheusMetrics = map[string]prometheus.Metric{}
}
m.PrometheusMetrics[metric.Desc().String()] = metric
ch <- metric
}
9 changes: 8 additions & 1 deletion custom-metrics-example/custom-metrics.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,38 +16,45 @@ from gv$sql
where last_active_time >= sysdate - 5/(24*60)
'''

# User segment queries may return zero rows on certain database configurations
[[metric]]
context = "size_user_segments_top100"
metricsdesc = {table_bytes="Gauge metric with the size of the tables in user segments."}
labels = ["segment_name"]
request = "select * from (select segment_name,sum(bytes) as table_bytes from user_segments where segment_type='TABLE' group by segment_name) order by table_bytes DESC FETCH NEXT 100 ROWS ONLY"
ignorezeroresult = true

[[metric]]
context = "size_user_segments_top100"
metricsdesc = {table_partition_bytes="Gauge metric with the size of the table partition in user segments."}
labels = ["segment_name"]
request = "select * from (select segment_name,sum(bytes) as table_partition_bytes from user_segments where segment_type='TABLE PARTITION' group by segment_name) order by table_partition_bytes DESC FETCH NEXT 100 ROWS ONLY"
ignorezeroresult = true

[[metric]]
context = "size_user_segments_top100"
metricsdesc = {cluster_bytes="Gauge metric with the size of the cluster in user segments."}
labels = ["segment_name"]
request = "select * from (select segment_name,sum(bytes) as cluster_bytes from user_segments where segment_type='CLUSTER' group by segment_name) order by cluster_bytes DESC FETCH NEXT 100 ROWS ONLY"
ignorezeroresult = true

[[metric]]
context = "size_dba_segments_top100"
metricsdesc = {table_bytes="Gauge metric with the size of the tables in user segments."}
labels = ["segment_name"]
request = "select * from (select segment_name,sum(bytes) as table_bytes from dba_segments where segment_type='TABLE' group by segment_name) order by table_bytes DESC FETCH NEXT 100 ROWS ONLY"
ignorezeroresult = true

[[metric]]
context = "size_dba_segments_top100"
metricsdesc = {table_partition_bytes="Gauge metric with the size of the table partition in user segments."}
labels = ["segment_name"]
request = "select * from (select segment_name,sum(bytes) as table_partition_bytes from dba_segments where segment_type='TABLE PARTITION' group by segment_name) order by table_partition_bytes DESC FETCH NEXT 100 ROWS ONLY"
ignorezeroresult = true

[[metric]]
context = "size_dba_segments_top100"
metricsdesc = {cluster_bytes="Gauge metric with the size of the cluster in user segments."}
labels = ["segment_name"]
request = "select * from (select segment_name,sum(bytes) as cluster_bytes from dba_segments where segment_type='CLUSTER' group by segment_name) order by cluster_bytes DESC FETCH NEXT 100 ROWS ONLY"
request = "select * from (select segment_name,sum(bytes) as cluster_bytes from dba_segments where segment_type='CLUSTER' group by segment_name) order by cluster_bytes DESC FETCH NEXT 100 ROWS ONLY"
ignorezeroresult = true
2 changes: 1 addition & 1 deletion docs/404.html
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
<meta charset="UTF-8">
<meta name="generator" content="Docusaurus v3.8.1">
<title data-rh="true">Page Not Found | Oracle Database Metrics Exporter</title><meta data-rh="true" name="viewport" content="width=device-width,initial-scale=1"><meta data-rh="true" name="twitter:card" content="summary_large_image"><meta data-rh="true" property="og:image" content="https://oracle.github.io/oracle-db-appdev-monitoring/img/logo.png"><meta data-rh="true" name="twitter:image" content="https://oracle.github.io/oracle-db-appdev-monitoring/img/logo.png"><meta data-rh="true" property="og:url" content="https://oracle.github.io/oracle-db-appdev-monitoring/404.html"><meta data-rh="true" property="og:locale" content="en"><meta data-rh="true" name="docusaurus_locale" content="en"><meta data-rh="true" name="docusaurus_tag" content="default"><meta data-rh="true" name="docsearch:language" content="en"><meta data-rh="true" name="docsearch:docusaurus_tag" content="default"><meta data-rh="true" property="og:title" content="Page Not Found | Oracle Database Metrics Exporter"><link data-rh="true" rel="icon" href="/oracle-db-appdev-monitoring/img/favicon-32x32.png"><link data-rh="true" rel="canonical" href="https://oracle.github.io/oracle-db-appdev-monitoring/404.html"><link data-rh="true" rel="alternate" href="https://oracle.github.io/oracle-db-appdev-monitoring/404.html" hreflang="en"><link data-rh="true" rel="alternate" href="https://oracle.github.io/oracle-db-appdev-monitoring/404.html" hreflang="x-default"><link rel="stylesheet" href="/oracle-db-appdev-monitoring/assets/css/styles.d329a656.css">
<script src="/oracle-db-appdev-monitoring/assets/js/runtime~main.f4fbb5be.js" defer="defer"></script>
<script src="/oracle-db-appdev-monitoring/assets/js/runtime~main.17989aec.js" defer="defer"></script>
<script src="/oracle-db-appdev-monitoring/assets/js/main.dfeca7cc.js" defer="defer"></script>
</head>
<body class="navigation-with-keyboard">
Expand Down
1 change: 1 addition & 0 deletions docs/assets/js/c539bf3f.35853f87.js

Large diffs are not rendered by default.

1 change: 0 additions & 1 deletion docs/assets/js/c539bf3f.7f73b018.js

This file was deleted.

1 change: 1 addition & 0 deletions docs/assets/js/runtime~main.17989aec.js

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading