Skip to content

Commit

Permalink
Support for prometheus scrape timeout in probe endpoint (#828)
Browse files Browse the repository at this point in the history
Signed-off-by: Martin Montes <martin11lrx@gmail.com>
  • Loading branch information
mmontes11 authored Apr 10, 2024
1 parent bc8e542 commit f3d4ccd
Show file tree
Hide file tree
Showing 3 changed files with 131 additions and 19 deletions.
57 changes: 38 additions & 19 deletions mysqld_exporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ package main

import (
"context"
"fmt"
"net/http"
"os"
"strconv"
Expand Down Expand Up @@ -127,6 +128,32 @@ func filterScrapers(scrapers []collector.Scraper, collectParams []string) []coll
return filteredScrapers
}

func getScrapeTimeoutSeconds(r *http.Request, offset float64) (float64, error) {
var timeoutSeconds float64
if v := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds"); v != "" {
var err error
timeoutSeconds, err = strconv.ParseFloat(v, 64)
if err != nil {
return 0, fmt.Errorf("failed to parse timeout from Prometheus header: %v", err)
}
}
if timeoutSeconds == 0 {
return 0, nil
}
if timeoutSeconds < 0 {
return 0, fmt.Errorf("timeout value from Prometheus header is invalid: %f", timeoutSeconds)
}

if offset >= timeoutSeconds {
// Ignore timeout offset if it doesn't leave time to scrape.
return 0, fmt.Errorf("timeout offset (%f) should be lower than prometheus scrape timeout (%f)", offset, timeoutSeconds)
} else {
// Subtract timeout offset from timeout.
timeoutSeconds -= offset
}
return timeoutSeconds, nil
}

func init() {
prometheus.MustRegister(version.NewCollector("mysqld_exporter"))
}
Expand Down Expand Up @@ -155,25 +182,17 @@ func newHandler(scrapers []collector.Scraper, logger log.Logger) http.HandlerFun
// Use request context for cancellation when connection gets closed.
ctx := r.Context()
// If a timeout is configured via the Prometheus header, add it to the context.
if v := r.Header.Get("X-Prometheus-Scrape-Timeout-Seconds"); v != "" {
timeoutSeconds, err := strconv.ParseFloat(v, 64)
if err != nil {
level.Error(logger).Log("msg", "Failed to parse timeout from Prometheus header", "err", err)
} else {
if *timeoutOffset >= timeoutSeconds {
// Ignore timeout offset if it doesn't leave time to scrape.
level.Error(logger).Log("msg", "Timeout offset should be lower than prometheus scrape timeout", "offset", *timeoutOffset, "prometheus_scrape_timeout", timeoutSeconds)
} else {
// Subtract timeout offset from timeout.
timeoutSeconds -= *timeoutOffset
}
// Create new timeout context with request context as parent.
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Duration(timeoutSeconds*float64(time.Second)))
defer cancel()
// Overwrite request with timeout context.
r = r.WithContext(ctx)
}
timeoutSeconds, err := getScrapeTimeoutSeconds(r, *timeoutOffset)
if err != nil {
level.Error(logger).Log("msg", "Error getting timeout from Prometheus header", "err", err)
}
if timeoutSeconds > 0 {
// Create new timeout context with request context as parent.
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Duration(timeoutSeconds*float64(time.Second)))
defer cancel()
// Overwrite request with timeout context.
r = r.WithContext(ctx)
}

filteredScrapers := filterScrapers(scrapers, collect)
Expand Down
77 changes: 77 additions & 0 deletions mysqld_exporter_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -304,3 +304,80 @@ func Test_filterScrapers(t *testing.T) {
})
}
}

func Test_getScrapeTimeoutSeconds(t *testing.T) {
type args struct {
timeoutHeader string
offset float64
}
tests := []struct {
name string
args args
wantTimeout float64
wantErr bool
}{
{"no_timeout_header",
args{},
0, false,
},
{"zero_timeout_header",
args{
timeoutHeader: "0",
},
0, false,
},
{"negative_timeout_header",
args{
timeoutHeader: "-5",
},
0, true,
},
{"offset_greater_than_timeout",
args{
timeoutHeader: "5",
offset: 6,
},
0, true,
},
{"offset_equal_timeout",
args{
timeoutHeader: "5",
offset: 5,
},
0, true,
},
{"offset_less_than_timeout",
args{
timeoutHeader: "5",
offset: 1,
},
4, false,
},
{"no_offset",
args{
timeoutHeader: "5",
},
5, false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
request, err := http.NewRequest(http.MethodGet, "", nil)
if err != nil {
t.Fatalf("unexpected error creating http request: %v", err)
}
request.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", tt.args.timeoutHeader)

timeout, err := getScrapeTimeoutSeconds(request, tt.args.offset)
if err != nil && !tt.wantErr {
t.Fatalf("unexpected error: %v", err)
}
if err == nil && tt.wantErr {
t.Fatal("expecting an error, got nil")
}
if timeout != tt.wantTimeout {
t.Fatalf("unexpected timeout, got '%f' but expected '%f'", timeout, tt.wantTimeout)
}
})
}
}
16 changes: 16 additions & 0 deletions probe.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,10 @@
package main

import (
"context"
"fmt"
"net/http"
"time"

"github.com/go-kit/log"
"github.com/go-kit/log/level"
Expand Down Expand Up @@ -54,6 +56,20 @@ func handleProbe(scrapers []collector.Scraper, logger log.Logger) http.HandlerFu
return
}

// If a timeout is configured via the Prometheus header, add it to the context.
timeoutSeconds, err := getScrapeTimeoutSeconds(r, *timeoutOffset)
if err != nil {
level.Error(logger).Log("msg", "Error getting timeout from Prometheus header", "err", err)
}
if timeoutSeconds > 0 {
// Create new timeout context with request context as parent.
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, time.Duration(timeoutSeconds*float64(time.Second)))
defer cancel()
// Overwrite request with timeout context.
r = r.WithContext(ctx)
}

filteredScrapers := filterScrapers(scrapers, collectParams)

registry := prometheus.NewRegistry()
Expand Down

0 comments on commit f3d4ccd

Please sign in to comment.