>
+
+ for c := 0; c < 256; c++ {
+ var t octetType
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
+ if strings.ContainsRune(" \t\r\n", rune(c)) {
+ t |= isSpace
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isToken
+ }
+ octetTypes[c] = t
+ }
+}
+
+// AcceptSpec describes an Accept* header.
+type AcceptSpec struct {
+ Value string
+ Q float64
+}
+
+// ParseAccept parses Accept* headers.
+func ParseAccept(header http.Header, key string) (specs []AcceptSpec) {
+loop:
+ for _, s := range header[key] {
+ for {
+ var spec AcceptSpec
+ spec.Value, s = expectTokenSlash(s)
+ if spec.Value == "" {
+ continue loop
+ }
+ spec.Q = 1.0
+ s = skipSpace(s)
+ if strings.HasPrefix(s, ";") {
+ s = skipSpace(s[1:])
+ if !strings.HasPrefix(s, "q=") {
+ continue loop
+ }
+ spec.Q, s = expectQuality(s[2:])
+ if spec.Q < 0.0 {
+ continue loop
+ }
+ }
+ specs = append(specs, spec)
+ s = skipSpace(s)
+ if !strings.HasPrefix(s, ",") {
+ continue loop
+ }
+ s = skipSpace(s[1:])
+ }
+ }
+ return
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpace == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func expectTokenSlash(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ b := s[i]
+ if (octetTypes[b]&isToken == 0) && b != '/' {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func expectQuality(s string) (q float64, rest string) {
+ switch {
+ case len(s) == 0:
+ return -1, ""
+ case s[0] == '0':
+ q = 0
+ case s[0] == '1':
+ q = 1
+ default:
+ return -1, ""
+ }
+ s = s[1:]
+ if !strings.HasPrefix(s, ".") {
+ return q, s
+ }
+ s = s[1:]
+ i := 0
+ n := 0
+ d := 1
+ for ; i < len(s); i++ {
+ b := s[i]
+ if b < '0' || b > '9' {
+ break
+ }
+ n = n*10 + int(b) - '0'
+ d *= 10
+ }
+ return q + float64(n)/float64(d), s[i:]
+}
diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go
new file mode 100644
index 00000000000..2e45780b74b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go
@@ -0,0 +1,36 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+package httputil
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header"
+)
+
+// NegotiateContentEncoding returns the best offered content encoding for the
+// request's Accept-Encoding header. If two offers match with equal weight and
+// then the offer earlier in the list is preferred. If no offers are
+// acceptable, then "" is returned.
+func NegotiateContentEncoding(r *http.Request, offers []string) string {
+ bestOffer := "identity"
+ bestQ := -1.0
+ specs := header.ParseAccept(r.Header, "Accept-Encoding")
+ for _, offer := range offers {
+ for _, spec := range specs {
+ if spec.Q > bestQ &&
+ (spec.Value == "*" || spec.Value == offer) {
+ bestQ = spec.Q
+ bestOffer = offer
+ }
+ }
+ }
+ if bestQ == 0 {
+ bestOffer = ""
+ }
+ return bestOffer
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
index 44986bff06b..c67ff1b7fa3 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/README.md
+++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -1 +1 @@
-See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
+See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
new file mode 100644
index 00000000000..450189f35ec
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
@@ -0,0 +1,38 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "runtime/debug"
+
+// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewBuildInfoCollector instead.
+func NewBuildInfoCollector() Collector {
+ path, version, sum := "unknown", "unknown", "unknown"
+ if bi, ok := debug.ReadBuildInfo(); ok {
+ path = bi.Main.Path
+ version = bi.Main.Version
+ sum = bi.Main.Sum
+ }
+ c := &selfCollector{MustNewConstMetric(
+ NewDesc(
+ "go_build_info",
+ "Build information about the main Go module.",
+ nil, Labels{"path": path, "version": version, "checksum": sum},
+ ),
+ GaugeValue, 1)}
+ c.init(c.self)
+ return c
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
index 1e839650d4d..cf05079fb82 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -69,9 +69,9 @@ type Collector interface {
// If a Collector collects the same metrics throughout its lifetime, its
// Describe method can simply be implemented as:
//
-// func (c customCollector) Describe(ch chan<- *Desc) {
-// DescribeByCollect(c, ch)
-// }
+// func (c customCollector) Describe(ch chan<- *Desc) {
+// DescribeByCollect(c, ch)
+// }
//
// However, this will not work if the metrics collected change dynamically over
// the lifetime of the Collector in a way that their combined set of descriptors
@@ -118,3 +118,11 @@ func (c *selfCollector) Describe(ch chan<- *Desc) {
func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}
+
+// collectorMetric is a metric that is also a collector.
+// Because of selfCollector, most (if not all) Metrics in
+// this package are also collectors.
+type collectorMetric interface {
+ Metric
+ Collector
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
index 3f8fd790d66..4ce84e7a80e 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -20,6 +20,7 @@ import (
"time"
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
// Counter is a Metric that represents a single numerical value that only ever
@@ -51,7 +52,7 @@ type Counter interface {
// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
// of the provided labels are invalid, or if the provided labels contain more
-// than 64 runes in total.
+// than 128 runes in total.
type ExemplarAdder interface {
AddWithExemplar(value float64, exemplar Labels)
}
@@ -59,6 +60,18 @@ type ExemplarAdder interface {
// CounterOpts is an alias for Opts. See there for doc comments.
type CounterOpts Opts
+// CounterVecOpts bundles the options to create a CounterVec metric.
+// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type CounterVecOpts struct {
+ CounterOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Constraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
// NewCounter creates a new Counter based on the provided CounterOpts.
//
// The returned implementation also implements ExemplarAdder. It is safe to
@@ -78,8 +91,12 @@ func NewCounter(opts CounterOpts) Counter {
nil,
opts.ConstLabels,
)
- result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
+ if opts.now == nil {
+ opts.now = time.Now
+ }
+ result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now}
result.init(result) // Init self-collection.
+ result.createdTs = timestamppb.New(opts.now())
return result
}
@@ -94,10 +111,12 @@ type counter struct {
selfCollector
desc *Desc
+ createdTs *timestamppb.Timestamp
labelPairs []*dto.LabelPair
exemplar atomic.Value // Containing nil or a *dto.Exemplar.
- now func() time.Time // To mock out time.Now() for testing.
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
}
func (c *counter) Desc() *Desc {
@@ -133,17 +152,21 @@ func (c *counter) Inc() {
atomic.AddUint64(&c.valInt, 1)
}
-func (c *counter) Write(out *dto.Metric) error {
+func (c *counter) get() float64 {
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
ival := atomic.LoadUint64(&c.valInt)
- val := fval + float64(ival)
+ return fval + float64(ival)
+}
+func (c *counter) Write(out *dto.Metric) error {
+ // Read the Exemplar first and the value second. This is to avoid a race condition
+ // where users see an exemplar for a not-yet-existing observation.
var exemplar *dto.Exemplar
if e := c.exemplar.Load(); e != nil {
exemplar = e.(*dto.Exemplar)
}
-
- return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
+ val := c.get()
+ return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs)
}
func (c *counter) updateExemplar(v float64, l Labels) {
@@ -169,19 +192,31 @@ type CounterVec struct {
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
// partitioned by the given label names.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
- desc := NewDesc(
+ return V2.NewCounterVec(CounterVecOpts{
+ CounterOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
+func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
+ if opts.now == nil {
+ opts.now = time.Now
+ }
return &CounterVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- if len(lvs) != len(desc.variableLabels) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
+ if len(lvs) != len(desc.variableLabels.names) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
}
- result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
+ result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now}
result.init(result) // Init self-collection.
+ result.createdTs = timestamppb.New(opts.now())
return result
}),
}
@@ -241,7 +276,8 @@ func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Add(42)
+//
+// myVec.WithLabelValues("404", "GET").Add(42)
func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
c, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
@@ -252,7 +288,8 @@ func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
func (v *CounterVec) With(labels Labels) Counter {
c, err := v.GetMetricWith(labels)
if err != nil {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 4bb816ab75a..68ffe3c2480 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -14,17 +14,16 @@
package prometheus
import (
- "errors"
"fmt"
"sort"
"strings"
"github.com/cespare/xxhash/v2"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
+ dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
+ "google.golang.org/protobuf/proto"
- dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/client_golang/prometheus/internal"
)
// Desc is the descriptor used by every Prometheus Metric. It is essentially
@@ -51,9 +50,9 @@ type Desc struct {
// constLabelPairs contains precalculated DTO label pairs based on
// the constant labels.
constLabelPairs []*dto.LabelPair
- // variableLabels contains names of labels for which the metric
- // maintains variable values.
- variableLabels []string
+ // variableLabels contains names of labels and normalization function for
+ // which the metric maintains variable values.
+ variableLabels *compiledLabels
// id is a hash of the values of the ConstLabels and fqName. This
// must be unique among all registered descriptors and can therefore be
// used as an identifier of the descriptor.
@@ -77,10 +76,24 @@ type Desc struct {
// For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Collector example for a usage pattern.
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName must not be empty.
+//
+// variableLabels only contain the label names and normalization functions. Their
+// label values are variable and therefore not part of the Desc. (They are managed
+// within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Collector example for a usage pattern.
+func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
d := &Desc{
fqName: fqName,
help: help,
- variableLabels: variableLabels,
+ variableLabels: variableLabels.compile(),
}
if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
@@ -90,7 +103,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// their sorted label names) plus the fqName (at position 0).
labelValues := make([]string, 1, len(constLabels)+1)
labelValues[0] = fqName
- labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names))
labelNameSet := map[string]struct{}{}
// First add only the const label names and sort them...
for labelName := range constLabels {
@@ -115,16 +128,16 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels.
- for _, labelName := range variableLabels {
- if !checkLabelName(labelName) {
- d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
+ for _, label := range d.variableLabels.names {
+ if !checkLabelName(label) {
+ d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName)
return d
}
- labelNames = append(labelNames, "$"+labelName)
- labelNameSet[labelName] = struct{}{}
+ labelNames = append(labelNames, "$"+label)
+ labelNameSet[label] = struct{}{}
}
if len(labelNames) != len(labelNameSet) {
- d.err = errors.New("duplicate label names")
+ d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
return d
}
@@ -154,7 +167,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
Value: proto.String(v),
})
}
- sort.Sort(labelPairSorter(d.constLabelPairs))
+ sort.Sort(internal.LabelPairSorter(d.constLabelPairs))
return d
}
@@ -176,11 +189,19 @@ func (d *Desc) String() string {
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
)
}
+ vlStrings := make([]string, 0, len(d.variableLabels.names))
+ for _, vl := range d.variableLabels.names {
+ if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
+ vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
+ } else {
+ vlStrings = append(vlStrings, vl)
+ }
+ }
return fmt.Sprintf(
- "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}",
d.fqName,
d.help,
strings.Join(lpStrings, ","),
- d.variableLabels,
+ strings.Join(vlStrings, ","),
)
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
index 98450125d6a..962608f02c6 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -21,55 +21,66 @@
// All exported functions and methods are safe to be used concurrently unless
// specified otherwise.
//
-// A Basic Example
+// # A Basic Example
//
// As a starting point, a very basic usage example:
//
-// package main
-//
-// import (
-// "log"
-// "net/http"
-//
-// "github.com/prometheus/client_golang/prometheus"
-// "github.com/prometheus/client_golang/prometheus/promhttp"
-// )
-//
-// var (
-// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
-// Name: "cpu_temperature_celsius",
-// Help: "Current temperature of the CPU.",
-// })
-// hdFailures = prometheus.NewCounterVec(
-// prometheus.CounterOpts{
-// Name: "hd_errors_total",
-// Help: "Number of hard-disk errors.",
-// },
-// []string{"device"},
-// )
-// )
-//
-// func init() {
-// // Metrics have to be registered to be exposed:
-// prometheus.MustRegister(cpuTemp)
-// prometheus.MustRegister(hdFailures)
-// }
-//
-// func main() {
-// cpuTemp.Set(65.3)
-// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
-//
-// // The Handler function provides a default handler to expose metrics
-// // via an HTTP server. "/metrics" is the usual endpoint for that.
-// http.Handle("/metrics", promhttp.Handler())
-// log.Fatal(http.ListenAndServe(":8080", nil))
-// }
-//
+// package main
+//
+// import (
+// "log"
+// "net/http"
+//
+// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
+// )
+//
+// type metrics struct {
+// cpuTemp prometheus.Gauge
+// hdFailures *prometheus.CounterVec
+// }
+//
+// func NewMetrics(reg prometheus.Registerer) *metrics {
+// m := &metrics{
+// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// }),
+// hdFailures: prometheus.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// },
+// []string{"device"},
+// ),
+// }
+// reg.MustRegister(m.cpuTemp)
+// reg.MustRegister(m.hdFailures)
+// return m
+// }
+//
+// func main() {
+// // Create a non-global registry.
+// reg := prometheus.NewRegistry()
+//
+// // Create new metrics and register them using the custom registry.
+// m := NewMetrics(reg)
+// // Set values for the new created metrics.
+// m.cpuTemp.Set(65.3)
+// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+//
+// // Expose metrics and custom registry via an HTTP server
+// // using the HandleFor function. "/metrics" is the usual endpoint for that.
+// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
+// log.Fatal(http.ListenAndServe(":8080", nil))
+// }
//
// This is a complete program that exports two metrics, a Gauge and a Counter,
// the latter with a label attached to turn it into a (one-dimensional) vector.
+// It register the metrics using a custom registry and exposes them via an HTTP server
+// on the /metrics endpoint.
//
-// Metrics
+// # Metrics
//
// The number of exported identifiers in this package might appear a bit
// overwhelming. However, in addition to the basic plumbing shown in the example
@@ -100,7 +111,7 @@
// To create instances of Metrics and their vector versions, you need a suitable
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
//
-// Custom Collectors and constant Metrics
+// # Custom Collectors and constant Metrics
//
// While you could create your own implementations of Metric, most likely you
// will only ever implement the Collector interface on your own. At a first
@@ -141,7 +152,7 @@
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
// shortcuts.
//
-// Advanced Uses of the Registry
+// # Advanced Uses of the Registry
//
// While MustRegister is the by far most common way of registering a Collector,
// sometimes you might want to handle the errors the registration might cause.
@@ -176,23 +187,23 @@
// NewProcessCollector). With a custom registry, you are in control and decide
// yourself about the Collectors to register.
//
-// HTTP Exposition
+// # HTTP Exposition
//
// The Registry implements the Gatherer interface. The caller of the Gather
// method can then expose the gathered metrics in some way. Usually, the metrics
// are served via HTTP on the /metrics endpoint. That's happening in the example
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
//
-// Pushing to the Pushgateway
+// # Pushing to the Pushgateway
//
// Function for pushing to the Pushgateway can be found in the push sub-package.
//
-// Graphite Bridge
+// # Graphite Bridge
//
// Functions and examples to push metrics from a Gatherer to Graphite can be
// found in the graphite sub-package.
//
-// Other Means of Exposition
+// # Other Means of Exposition
//
// More ways of exposing metrics can easily be added by following the approaches
// of the existing implementations.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
index c41ab37f3bb..de5a8562931 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
@@ -48,7 +48,7 @@ func (e *expvarCollector) Collect(ch chan<- Metric) {
continue
}
var v interface{}
- labels := make([]string, len(desc.variableLabels))
+ labels := make([]string, len(desc.variableLabels.names))
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
ch <- NewInvalidMetric(desc, err)
continue
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
index bd0733d6a7d..dd2eac94067 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -55,6 +55,18 @@ type Gauge interface {
// GaugeOpts is an alias for Opts. See there for doc comments.
type GaugeOpts Opts
+// GaugeVecOpts bundles the options to create a GaugeVec metric.
+// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type GaugeVecOpts struct {
+ GaugeOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Constraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
// NewGauge creates a new Gauge based on the provided GaugeOpts.
//
// The returned implementation is optimized for a fast Set method. If you have a
@@ -123,7 +135,7 @@ func (g *gauge) Sub(val float64) {
func (g *gauge) Write(out *dto.Metric) error {
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
- return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
+ return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil)
}
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
@@ -138,16 +150,24 @@ type GaugeVec struct {
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
// partitioned by the given label names.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
- desc := NewDesc(
+ return V2.NewGaugeVec(GaugeVecOpts{
+ GaugeOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
+func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &GaugeVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- if len(lvs) != len(desc.variableLabels) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
+ if len(lvs) != len(desc.variableLabels.names) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
}
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.
@@ -210,7 +230,8 @@ func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Add(42)
+//
+// myVec.WithLabelValues("404", "GET").Add(42)
func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
g, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
@@ -221,7 +242,8 @@ func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
func (v *GaugeVec) With(labels Labels) Gauge {
g, err := v.GetMetricWith(labels)
if err != nil {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
new file mode 100644
index 00000000000..614fd61be95
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
@@ -0,0 +1,26 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !js || wasm
+// +build !js wasm
+
+package prometheus
+
+import "os"
+
+func getPIDFn() func() (int, error) {
+ pid := os.Getpid()
+ return func() (int, error) {
+ return pid, nil
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
new file mode 100644
index 00000000000..eaf8059ee15
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
@@ -0,0 +1,23 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js && !wasm
+// +build js,!wasm
+
+package prometheus
+
+func getPIDFn() func() (int, error) {
+ return func() (int, error) {
+ return 1, nil
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index a96ed1cee88..520cbd7d418 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -16,32 +16,198 @@ package prometheus
import (
"runtime"
"runtime/debug"
- "sync"
"time"
)
-type goCollector struct {
+// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
+// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
+// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
+// populated using runtime/metrics. Those are the defaults we can't alter.
+func goRuntimeMemStats() memStatsMetrics {
+ return memStatsMetrics{
+ {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes"),
+ "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes_total"),
+ "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("sys_bytes"),
+ "Number of bytes obtained from system. Equals to /memory/classes/total:byte.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mallocs_total"),
+ // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric.
+ "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("frees_total"),
+ "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_alloc_bytes"),
+ "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_sys_bytes"),
+ "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_idle_bytes"),
+ "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_inuse_bytes"),
+ "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_released_bytes"),
+ "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_objects"),
+ "Number of currently allocated objects. Equals to /gc/heap/objects:objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_inuse_bytes"),
+ "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_sys_bytes"),
+ "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_inuse_bytes"),
+ "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_sys_bytes"),
+ "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_inuse_bytes"),
+ "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_sys_bytes"),
+ "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("buck_hash_sys_bytes"),
+ "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_sys_bytes"),
+ "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("other_sys_bytes"),
+ "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("next_gc_bytes"),
+ "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+ valType: GaugeValue,
+ },
+ }
+}
+
+type baseGoCollector struct {
goroutinesDesc *Desc
threadsDesc *Desc
gcDesc *Desc
+ gcLastTimeDesc *Desc
goInfoDesc *Desc
-
- // ms... are memstats related.
- msLast *runtime.MemStats // Previously collected memstats.
- msLastTimestamp time.Time
- msMtx sync.Mutex // Protects msLast and msLastTimestamp.
- msMetrics memStatsMetrics
- msRead func(*runtime.MemStats) // For mocking in tests.
- msMaxWait time.Duration // Wait time for fresh memstats.
- msMaxAge time.Duration // Maximum allowed age of old memstats.
}
-// NewGoCollector is the obsolete version of collectors.NewGoCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewGoCollector instead.
-func NewGoCollector() Collector {
- return &goCollector{
+func newBaseGoCollector() baseGoCollector {
+ return baseGoCollector{
goroutinesDesc: NewDesc(
"go_goroutines",
"Number of goroutines that currently exist.",
@@ -52,248 +218,34 @@ func NewGoCollector() Collector {
nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
- "A summary of the pause duration of garbage collection cycles.",
+ "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.",
+ nil, nil),
+ gcLastTimeDesc: NewDesc(
+ "go_memstats_last_gc_time_seconds",
+ "Number of seconds since 1970 of last garbage collection.",
nil, nil),
goInfoDesc: NewDesc(
"go_info",
"Information about the Go environment.",
nil, Labels{"version": runtime.Version()}),
- msLast: &runtime.MemStats{},
- msRead: runtime.ReadMemStats,
- msMaxWait: time.Second,
- msMaxAge: 5 * time.Minute,
- msMetrics: memStatsMetrics{
- {
- desc: NewDesc(
- memstatNamespace("alloc_bytes"),
- "Number of bytes allocated and still in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("alloc_bytes_total"),
- "Total number of bytes allocated, even if freed.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("sys_bytes"),
- "Number of bytes obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("lookups_total"),
- "Total number of pointer lookups.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mallocs_total"),
- "Total number of mallocs.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("frees_total"),
- "Total number of frees.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_alloc_bytes"),
- "Number of heap bytes allocated and still in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_sys_bytes"),
- "Number of heap bytes obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_idle_bytes"),
- "Number of heap bytes waiting to be used.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_inuse_bytes"),
- "Number of heap bytes that are in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_released_bytes"),
- "Number of heap bytes released to OS.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_objects"),
- "Number of allocated objects.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_inuse_bytes"),
- "Number of bytes in use by the stack allocator.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_sys_bytes"),
- "Number of bytes obtained from system for stack allocator.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_inuse_bytes"),
- "Number of bytes in use by mspan structures.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_sys_bytes"),
- "Number of bytes used for mspan structures obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_inuse_bytes"),
- "Number of bytes in use by mcache structures.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_sys_bytes"),
- "Number of bytes used for mcache structures obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("buck_hash_sys_bytes"),
- "Number of bytes used by the profiling bucket hash table.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("gc_sys_bytes"),
- "Number of bytes used for garbage collection system metadata.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("other_sys_bytes"),
- "Number of bytes used for other system allocations.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("next_gc_bytes"),
- "Number of heap bytes when next garbage collection will take place.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("last_gc_time_seconds"),
- "Number of seconds since 1970 of last garbage collection.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("gc_cpu_fraction"),
- "The fraction of this program's available CPU time used by the GC since the program started.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
- valType: GaugeValue,
- },
- },
}
}
-func memstatNamespace(s string) string {
- return "go_memstats_" + s
-}
-
// Describe returns all descriptions of the collector.
-func (c *goCollector) Describe(ch chan<- *Desc) {
+func (c *baseGoCollector) Describe(ch chan<- *Desc) {
ch <- c.goroutinesDesc
ch <- c.threadsDesc
ch <- c.gcDesc
+ ch <- c.gcLastTimeDesc
ch <- c.goInfoDesc
- for _, i := range c.msMetrics {
- ch <- i.desc
- }
}
// Collect returns the current state of all metrics of the collector.
-func (c *goCollector) Collect(ch chan<- Metric) {
- var (
- ms = &runtime.MemStats{}
- done = make(chan struct{})
- )
- // Start reading memstats first as it might take a while.
- go func() {
- c.msRead(ms)
- c.msMtx.Lock()
- c.msLast = ms
- c.msLastTimestamp = time.Now()
- c.msMtx.Unlock()
- close(done)
- }()
-
+func (c *baseGoCollector) Collect(ch chan<- Metric) {
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
- n, _ := runtime.ThreadCreateProfile(nil)
- ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
+
+ n := getRuntimeNumThreads()
+ ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n)
var stats debug.GCStats
stats.PauseQuantiles = make([]time.Duration, 5)
@@ -305,63 +257,18 @@ func (c *goCollector) Collect(ch chan<- Metric) {
}
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
-
+ ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
-
- timer := time.NewTimer(c.msMaxWait)
- select {
- case <-done: // Our own ReadMemStats succeeded in time. Use it.
- timer.Stop() // Important for high collection frequencies to not pile up timers.
- c.msCollect(ch, ms)
- return
- case <-timer.C: // Time out, use last memstats if possible. Continue below.
- }
- c.msMtx.Lock()
- if time.Since(c.msLastTimestamp) < c.msMaxAge {
- // Last memstats are recent enough. Collect from them under the lock.
- c.msCollect(ch, c.msLast)
- c.msMtx.Unlock()
- return
- }
- // If we are here, the last memstats are too old or don't exist. We have
- // to wait until our own ReadMemStats finally completes. For that to
- // happen, we have to release the lock.
- c.msMtx.Unlock()
- <-done
- c.msCollect(ch, ms)
}
-func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
- for _, i := range c.msMetrics {
- ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
- }
+func memstatNamespace(s string) string {
+ return "go_memstats_" + s
}
-// memStatsMetrics provide description, value, and value type for memstat metrics.
+// memStatsMetrics provide description, evaluator, runtime/metrics name, and
+// value type for memstat metrics.
type memStatsMetrics []struct {
desc *Desc
eval func(*runtime.MemStats) float64
valType ValueType
}
-
-// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewBuildInfoCollector instead.
-func NewBuildInfoCollector() Collector {
- path, version, sum := "unknown", "unknown", "unknown"
- if bi, ok := debug.ReadBuildInfo(); ok {
- path = bi.Main.Path
- version = bi.Main.Version
- sum = bi.Main.Sum
- }
- c := &selfCollector{MustNewConstMetric(
- NewDesc(
- "go_build_info",
- "Build information about the main Go module.",
- nil, Labels{"path": path, "version": version, "checksum": sum},
- ),
- GaugeValue, 1)}
- c.init(c.self)
- return c
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
new file mode 100644
index 00000000000..897a6e906b3
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
@@ -0,0 +1,122 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !go1.17
+// +build !go1.17
+
+package prometheus
+
+import (
+ "runtime"
+ "sync"
+ "time"
+)
+
+type goCollector struct {
+ base baseGoCollector
+
+ // ms... are memstats related.
+ msLast *runtime.MemStats // Previously collected memstats.
+ msLastTimestamp time.Time
+ msMtx sync.Mutex // Protects msLast and msLastTimestamp.
+ msMetrics memStatsMetrics
+ msRead func(*runtime.MemStats) // For mocking in tests.
+ msMaxWait time.Duration // Wait time for fresh memstats.
+ msMaxAge time.Duration // Maximum allowed age of old memstats.
+}
+
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewGoCollector instead.
+func NewGoCollector() Collector {
+ msMetrics := goRuntimeMemStats()
+ msMetrics = append(msMetrics, struct {
+ desc *Desc
+ eval func(*runtime.MemStats) float64
+ valType ValueType
+ }{
+ // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+ desc: NewDesc(
+ memstatNamespace("gc_cpu_fraction"),
+ "The fraction of this program's available CPU time used by the GC since the program started.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+ valType: GaugeValue,
+ })
+ return &goCollector{
+ base: newBaseGoCollector(),
+ msLast: &runtime.MemStats{},
+ msRead: runtime.ReadMemStats,
+ msMaxWait: time.Second,
+ msMaxAge: 5 * time.Minute,
+ msMetrics: msMetrics,
+ }
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ c.base.Describe(ch)
+ for _, i := range c.msMetrics {
+ ch <- i.desc
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ var (
+ ms = &runtime.MemStats{}
+ done = make(chan struct{})
+ )
+ // Start reading memstats first as it might take a while.
+ go func() {
+ c.msRead(ms)
+ c.msMtx.Lock()
+ c.msLast = ms
+ c.msLastTimestamp = time.Now()
+ c.msMtx.Unlock()
+ close(done)
+ }()
+
+ // Collect base non-memory metrics.
+ c.base.Collect(ch)
+
+ timer := time.NewTimer(c.msMaxWait)
+ select {
+ case <-done: // Our own ReadMemStats succeeded in time. Use it.
+ timer.Stop() // Important for high collection frequencies to not pile up timers.
+ c.msCollect(ch, ms)
+ return
+ case <-timer.C: // Time out, use last memstats if possible. Continue below.
+ }
+ c.msMtx.Lock()
+ if time.Since(c.msLastTimestamp) < c.msMaxAge {
+ // Last memstats are recent enough. Collect from them under the lock.
+ c.msCollect(ch, c.msLast)
+ c.msMtx.Unlock()
+ return
+ }
+ // If we are here, the last memstats are too old or don't exist. We have
+ // to wait until our own ReadMemStats finally completes. For that to
+ // happen, we have to release the lock.
+ c.msMtx.Unlock()
+ <-done
+ c.msCollect(ch, ms)
+}
+
+func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
+ for _, i := range c.msMetrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
new file mode 100644
index 00000000000..51174641729
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
@@ -0,0 +1,574 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+ "runtime/metrics"
+ "strings"
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus/internal"
+
+ dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
+)
+
+const (
+ // constants for strings referenced more than once.
+ goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects"
+ goGCHeapAllocsObjects = "/gc/heap/allocs:objects"
+ goGCHeapFreesObjects = "/gc/heap/frees:objects"
+ goGCHeapFreesBytes = "/gc/heap/frees:bytes"
+ goGCHeapAllocsBytes = "/gc/heap/allocs:bytes"
+ goGCHeapObjects = "/gc/heap/objects:objects"
+ goGCHeapGoalBytes = "/gc/heap/goal:bytes"
+ goMemoryClassesTotalBytes = "/memory/classes/total:bytes"
+ goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes"
+ goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes"
+ goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes"
+ goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes"
+ goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes"
+ goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes"
+ goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes"
+ goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes"
+ goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
+ goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes"
+ goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes"
+ goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes"
+ goMemoryClassesOtherBytes = "/memory/classes/other:bytes"
+)
+
+// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic.
+var rmNamesForMemStatsMetrics = []string{
+ goGCHeapTinyAllocsObjects,
+ goGCHeapAllocsObjects,
+ goGCHeapFreesObjects,
+ goGCHeapAllocsBytes,
+ goGCHeapObjects,
+ goGCHeapGoalBytes,
+ goMemoryClassesTotalBytes,
+ goMemoryClassesHeapObjectsBytes,
+ goMemoryClassesHeapUnusedBytes,
+ goMemoryClassesHeapReleasedBytes,
+ goMemoryClassesHeapFreeBytes,
+ goMemoryClassesHeapStacksBytes,
+ goMemoryClassesOSStacksBytes,
+ goMemoryClassesMetadataMSpanInuseBytes,
+ goMemoryClassesMetadataMSPanFreeBytes,
+ goMemoryClassesMetadataMCacheInuseBytes,
+ goMemoryClassesMetadataMCacheFreeBytes,
+ goMemoryClassesProfilingBucketsBytes,
+ goMemoryClassesMetadataOtherBytes,
+ goMemoryClassesOtherBytes,
+}
+
+func bestEffortLookupRM(lookup []string) []metrics.Description {
+ ret := make([]metrics.Description, 0, len(lookup))
+ for _, rm := range metrics.All() {
+ for _, m := range lookup {
+ if m == rm.Name {
+ ret = append(ret, rm)
+ }
+ }
+ }
+ return ret
+}
+
+type goCollector struct {
+ base baseGoCollector
+
+ // mu protects updates to all fields ensuring a consistent
+ // snapshot is always produced by Collect.
+ mu sync.Mutex
+
+ // Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed).
+ sampleBuf []metrics.Sample
+ // sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums.
+ sampleMap map[string]*metrics.Sample
+
+ // rmExposedMetrics represents all runtime/metrics package metrics
+ // that were configured to be exposed.
+ rmExposedMetrics []collectorMetric
+ rmExactSumMapForHist map[string]string
+
+ // With Go 1.17, the runtime/metrics package was introduced.
+ // From that point on, metric names produced by the runtime/metrics
+ // package could be generated from runtime/metrics names. However,
+ // these differ from the old names for the same values.
+ //
+ // This field exists to export the same values under the old names
+ // as well.
+ msMetrics memStatsMetrics
+ msMetricsEnabled bool
+}
+
+type rmMetricDesc struct {
+ metrics.Description
+}
+
+func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc {
+ var descs []rmMetricDesc
+ for _, d := range metrics.All() {
+ var (
+ deny = true
+ desc rmMetricDesc
+ )
+
+ for _, r := range rules {
+ if !r.Matcher.MatchString(d.Name) {
+ continue
+ }
+ deny = r.Deny
+ }
+ if deny {
+ continue
+ }
+
+ desc.Description = d
+ descs = append(descs, desc)
+ }
+ return descs
+}
+
+func defaultGoCollectorOptions() internal.GoCollectorOptions {
+ return internal.GoCollectorOptions{
+ RuntimeMetricSumForHist: map[string]string{
+ "/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes,
+ "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes,
+ },
+ RuntimeMetricRules: []internal.GoCollectorRule{
+ // Recommended metrics we want by default from runtime/metrics.
+ {Matcher: internal.GoCollectorDefaultRuntimeMetrics},
+ },
+ }
+}
+
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewGoCollector instead.
+func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
+ opt := defaultGoCollectorOptions()
+ for _, o := range opts {
+ o(&opt)
+ }
+
+ exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules)
+
+ // Collect all histogram samples so that we can get their buckets.
+ // The API guarantees that the buckets are always fixed for the lifetime
+ // of the process.
+ var histograms []metrics.Sample
+ for _, d := range exposedDescriptions {
+ if d.Kind == metrics.KindFloat64Histogram {
+ histograms = append(histograms, metrics.Sample{Name: d.Name})
+ }
+ }
+
+ if len(histograms) > 0 {
+ metrics.Read(histograms)
+ }
+
+ bucketsMap := make(map[string][]float64)
+ for i := range histograms {
+ bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
+ }
+
+ // Generate a collector for each exposed runtime/metrics metric.
+ metricSet := make([]collectorMetric, 0, len(exposedDescriptions))
+ // SampleBuf is used for reading from runtime/metrics.
+ // We are assuming the largest case to have stable pointers for sampleMap purposes.
+ sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics))
+ sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions))
+ for _, d := range exposedDescriptions {
+ namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description)
+ if !ok {
+ // Just ignore this metric; we can't do anything with it here.
+ // If a user decides to use the latest version of Go, we don't want
+ // to fail here. This condition is tested in TestExpectedRuntimeMetrics.
+ continue
+ }
+ help := attachOriginalName(d.Description.Description, d.Name)
+
+ sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
+ sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
+
+ var m collectorMetric
+ if d.Kind == metrics.KindFloat64Histogram {
+ _, hasSum := opt.RuntimeMetricSumForHist[d.Name]
+ unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
+ m = newBatchHistogram(
+ NewDesc(
+ BuildFQName(namespace, subsystem, name),
+ help,
+ nil,
+ nil,
+ ),
+ internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
+ hasSum,
+ )
+ } else if d.Cumulative {
+ m = NewCounter(CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: name,
+ Help: help,
+ },
+ )
+ } else {
+ m = NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: name,
+ Help: help,
+ })
+ }
+ metricSet = append(metricSet, m)
+ }
+
+ // Add exact sum metrics to sampleBuf if not added before.
+ for _, h := range histograms {
+ sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name]
+ if !ok {
+ continue
+ }
+
+ if _, ok := sampleMap[sumMetric]; ok {
+ continue
+ }
+ sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric})
+ sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1]
+ }
+
+ var (
+ msMetrics memStatsMetrics
+ msDescriptions []metrics.Description
+ )
+
+ if !opt.DisableMemStatsLikeMetrics {
+ msMetrics = goRuntimeMemStats()
+ msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics)
+
+ // Check if metric was not exposed before and if not, add to sampleBuf.
+ for _, mdDesc := range msDescriptions {
+ if _, ok := sampleMap[mdDesc.Name]; ok {
+ continue
+ }
+ sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name})
+ sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1]
+ }
+ }
+
+ return &goCollector{
+ base: newBaseGoCollector(),
+ sampleBuf: sampleBuf,
+ sampleMap: sampleMap,
+ rmExposedMetrics: metricSet,
+ rmExactSumMapForHist: opt.RuntimeMetricSumForHist,
+ msMetrics: msMetrics,
+ msMetricsEnabled: !opt.DisableMemStatsLikeMetrics,
+ }
+}
+
+func attachOriginalName(desc, origName string) string {
+ return fmt.Sprintf("%s Sourced from %s", desc, origName)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ c.base.Describe(ch)
+ for _, i := range c.msMetrics {
+ ch <- i.desc
+ }
+ for _, m := range c.rmExposedMetrics {
+ ch <- m.Desc()
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ // Collect base non-memory metrics.
+ c.base.Collect(ch)
+
+ if len(c.sampleBuf) == 0 {
+ return
+ }
+
+ // Collect must be thread-safe, so prevent concurrent use of
+ // sampleBuf elements. Just read into sampleBuf but write all the data
+ // we get into our Metrics or MemStats.
+ //
+ // This lock also ensures that the Metrics we send out are all from
+ // the same updates, ensuring their mutual consistency insofar as
+ // is guaranteed by the runtime/metrics package.
+ //
+ // N.B. This locking is heavy-handed, but Collect is expected to be called
+ // relatively infrequently. Also the core operation here, metrics.Read,
+ // is fast (O(tens of microseconds)) so contention should certainly be
+ // low, though channel operations and any allocations may add to that.
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ // Populate runtime/metrics sample buffer.
+ metrics.Read(c.sampleBuf)
+
+ // Collect all our runtime/metrics user chose to expose from sampleBuf (if any).
+ for i, metric := range c.rmExposedMetrics {
+ // We created samples for exposed metrics first in order, so indexes match.
+ sample := c.sampleBuf[i]
+
+ // N.B. switch on concrete type because it's significantly more efficient
+ // than checking for the Counter and Gauge interface implementations. In
+ // this case, we control all the types here.
+ switch m := metric.(type) {
+ case *counter:
+ // Guard against decreases. This should never happen, but a failure
+ // to do so will result in a panic, which is a harsh consequence for
+ // a metrics collection bug.
+ v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
+ if v1 > v0 {
+ m.Add(unwrapScalarRMValue(sample.Value) - m.get())
+ }
+ m.Collect(ch)
+ case *gauge:
+ m.Set(unwrapScalarRMValue(sample.Value))
+ m.Collect(ch)
+ case *batchHistogram:
+ m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
+ m.Collect(ch)
+ default:
+ panic("unexpected metric type")
+ }
+ }
+
+ if c.msMetricsEnabled {
+ // ms is a dummy MemStats that we populate ourselves so that we can
+ // populate the old metrics from it if goMemStatsCollection is enabled.
+ var ms runtime.MemStats
+ memStatsFromRM(&ms, c.sampleMap)
+ for _, i := range c.msMetrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
+ }
+ }
+}
+
+// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
+// to be scalar and returns the equivalent float64 value. Panics if the
+// value is not scalar.
+func unwrapScalarRMValue(v metrics.Value) float64 {
+ switch v.Kind() {
+ case metrics.KindUint64:
+ return float64(v.Uint64())
+ case metrics.KindFloat64:
+ return v.Float64()
+ case metrics.KindBad:
+ // Unsupported metric.
+ //
+ // This should never happen because we always populate our metric
+ // set from the runtime/metrics package.
+ panic("unexpected bad kind metric")
+ default:
+ // Unsupported metric kind.
+ //
+ // This should never happen because we check for this during initialization
+ // and flag and filter metrics whose kinds we don't understand.
+ panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind()))
+ }
+}
+
+// exactSumFor takes a runtime/metrics metric name (that is assumed to
+// be of kind KindFloat64Histogram) and returns its exact sum and whether
+// its exact sum exists.
+//
+// The runtime/metrics API for histograms doesn't currently expose exact
+// sums, but some of the other metrics are in fact exact sums of histograms.
+func (c *goCollector) exactSumFor(rmName string) float64 {
+ sumName, ok := c.rmExactSumMapForHist[rmName]
+ if !ok {
+ return 0
+ }
+ s, ok := c.sampleMap[sumName]
+ if !ok {
+ return 0
+ }
+ return unwrapScalarRMValue(s.Value)
+}
+
+func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
+ lookupOrZero := func(name string) uint64 {
+ if s, ok := rm[name]; ok {
+ return s.Value.Uint64()
+ }
+ return 0
+ }
+
+ // Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
+ // The reason for this is because MemStats couldn't be extended at the time
+ // but there was a desire to have Mallocs at least be a little more representative,
+ // while having Mallocs - Frees still represent a live object count.
+ // Unfortunately, MemStats doesn't actually export a large allocation count,
+ // so it's impossible to pull this number out directly.
+ tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
+ ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
+ ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
+
+ ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
+ ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
+ ms.Lookups = 0 // Already always zero.
+ ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
+ ms.Alloc = ms.HeapAlloc
+ ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
+ ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
+ ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
+ ms.HeapSys = ms.HeapInuse + ms.HeapIdle
+ ms.HeapObjects = lookupOrZero(goGCHeapObjects)
+ ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
+ ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
+ ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
+ ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
+ ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
+ ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
+ ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
+ ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
+ ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
+ ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
+
+ // N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
+ // and often misleading due to the fact that it's an average over the lifetime
+ // of the process.
+ // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+ // for more details.
+ ms.GCCPUFraction = 0
+}
+
+// batchHistogram is a mutable histogram that is updated
+// in batches.
+type batchHistogram struct {
+ selfCollector
+
+ // Static fields updated only once.
+ desc *Desc
+ hasSum bool
+
+ // Because this histogram operates in batches, it just uses a
+ // single mutex for everything. updates are always serialized
+ // but Write calls may operate concurrently with updates.
+ // Contention between these two sources should be rare.
+ mu sync.Mutex
+ buckets []float64 // Inclusive lower bounds, like runtime/metrics.
+ counts []uint64
+ sum float64 // Used if hasSum is true.
+}
+
+// newBatchHistogram creates a new batch histogram value with the given
+// Desc, buckets, and whether or not it has an exact sum available.
+//
+// buckets must always be from the runtime/metrics package, following
+// the same conventions.
+func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
+ // We need to remove -Inf values. runtime/metrics keeps them around.
+ // But -Inf bucket should not be allowed for prometheus histograms.
+ if buckets[0] == math.Inf(-1) {
+ buckets = buckets[1:]
+ }
+ h := &batchHistogram{
+ desc: desc,
+ buckets: buckets,
+ // Because buckets follows runtime/metrics conventions, there's
+ // 1 more value in the buckets list than there are buckets represented,
+ // because in runtime/metrics, the bucket values represent *boundaries*,
+ // and non-Inf boundaries are inclusive lower bounds for that bucket.
+ counts: make([]uint64, len(buckets)-1),
+ hasSum: hasSum,
+ }
+ h.init(h)
+ return h
+}
+
+// update updates the batchHistogram from a runtime/metrics histogram.
+//
+// sum must be provided if the batchHistogram was created to have an exact sum.
+// h.buckets must be a strict subset of his.Buckets.
+func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
+ counts, buckets := his.Counts, his.Buckets
+
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ // Clear buckets.
+ for i := range h.counts {
+ h.counts[i] = 0
+ }
+ // Copy and reduce buckets.
+ var j int
+ for i, count := range counts {
+ h.counts[j] += count
+ if buckets[i+1] == h.buckets[j+1] {
+ j++
+ }
+ }
+ if h.hasSum {
+ h.sum = sum
+ }
+}
+
+func (h *batchHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *batchHistogram) Write(out *dto.Metric) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ sum := float64(0)
+ if h.hasSum {
+ sum = h.sum
+ }
+ dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
+ totalCount := uint64(0)
+ for i, count := range h.counts {
+ totalCount += count
+ if !h.hasSum {
+ if count != 0 {
+ // N.B. This computed sum is an underestimate.
+ sum += h.buckets[i] * float64(count)
+ }
+ }
+
+ // Skip the +Inf bucket, but only for the bucket list.
+ // It must still count for sum and totalCount.
+ if math.IsInf(h.buckets[i+1], 1) {
+ break
+ }
+ // Float64Histogram's upper bound is exclusive, so make it inclusive
+ // by obtaining the next float64 value down, in order.
+ upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
+ dtoBuckets = append(dtoBuckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(totalCount),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+ out.Histogram = &dto.Histogram{
+ Bucket: dtoBuckets,
+ SampleCount: proto.Uint64(totalCount),
+ SampleSum: proto.Float64(sum),
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 8425640b390..519db348a74 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -22,25 +22,222 @@ import (
"sync/atomic"
"time"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
-
dto "github.com/prometheus/client_model/go"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
+// nativeHistogramBounds for the frac of observed values. Only relevant for
+// schema > 0. The position in the slice is the schema. (0 is never used, just
+// here for convenience of using the schema directly as the index.)
+//
+// TODO(beorn7): Currently, we do a binary search into these slices. There are
+// ways to turn it into a small number of simple array lookups. It probably only
+// matters for schema 5 and beyond, but should be investigated. See this comment
+// as a starting point:
+// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310
+var nativeHistogramBounds = [][]float64{
+ // Schema "0":
+ {0.5},
+ // Schema 1:
+ {0.5, 0.7071067811865475},
+ // Schema 2:
+ {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144},
+ // Schema 3:
+ {
+ 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048,
+ 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711,
+ },
+ // Schema 4:
+ {
+ 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458,
+ 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463,
+ 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627,
+ 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735,
+ },
+ // Schema 5:
+ {
+ 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117,
+ 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887,
+ 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666,
+ 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159,
+ 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112,
+ 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823,
+ 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533,
+ 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999,
+ },
+ // Schema 6:
+ {
+ 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142,
+ 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598,
+ 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209,
+ 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406,
+ 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349,
+ 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891,
+ 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515,
+ 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555,
+ 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234,
+ 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269,
+ 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334,
+ 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681,
+ 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529,
+ 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991,
+ 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827,
+ 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752,
+ },
+ // Schema 7:
+ {
+ 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764,
+ 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894,
+ 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309,
+ 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545,
+ 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393,
+ 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595,
+ 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754,
+ 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704,
+ 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907,
+ 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665,
+ 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253,
+ 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329,
+ 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032,
+ 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728,
+ 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265,
+ 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076,
+ 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491,
+ 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908,
+ 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126,
+ 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777,
+ 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764,
+ 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465,
+ 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821,
+ 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981,
+ 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312,
+ 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842,
+ 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671,
+ 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263,
+ 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943,
+ 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368,
+ 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164,
+ 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328,
+ },
+ // Schema 8:
+ {
+ 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088,
+ 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869,
+ 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205,
+ 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158,
+ 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313,
+ 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321,
+ 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954,
+ 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847,
+ 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111,
+ 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088,
+ 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098,
+ 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026,
+ 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894,
+ 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493,
+ 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185,
+ 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968,
+ 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903,
+ 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005,
+ 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725,
+ 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082,
+ 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581,
+ 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031,
+ 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346,
+ 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447,
+ 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385,
+ 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788,
+ 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727,
+ 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171,
+ 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058,
+ 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119,
+ 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999,
+ 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352,
+ 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471,
+ 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126,
+ 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218,
+ 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837,
+ 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984,
+ 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031,
+ 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071,
+ 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282,
+ 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442,
+ 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707,
+ 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818,
+ 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853,
+ 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642,
+ 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003,
+ 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079,
+ 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391,
+ 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661,
+ 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629,
+ 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553,
+ 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389,
+ 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771,
+ 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002,
+ 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155,
+ 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483,
+ 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253,
+ 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191,
+ 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693,
+ 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947,
+ 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133,
+ 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889,
+ 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168,
+ 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698,
+ },
+}
+
+// The nativeHistogramBounds above can be generated with the code below.
+//
+// TODO(beorn7): It's tempting to actually use `go generate` to generate the
+// code above. However, this could lead to slightly different numbers on
+// different architectures. We still need to come to terms if we are fine with
+// that, or if we might prefer to specify precise numbers in the standard.
+//
+// var nativeHistogramBounds [][]float64 = make([][]float64, 9)
+//
+// func init() {
+// // Populate nativeHistogramBounds.
+// numBuckets := 1
+// for i := range nativeHistogramBounds {
+// bounds := []float64{0.5}
+// factor := math.Exp2(math.Exp2(float64(-i)))
+// for j := 0; j < numBuckets-1; j++ {
+// var bound float64
+// if (j+1)%2 == 0 {
+// // Use previously calculated value for increased precision.
+// bound = nativeHistogramBounds[i-1][j/2+1]
+// } else {
+// bound = bounds[j] * factor
+// }
+// bounds = append(bounds, bound)
+// }
+// numBuckets *= 2
+// nativeHistogramBounds[i] = bounds
+// }
+// }
+
// A Histogram counts individual observations from an event or sample stream in
-// configurable buckets. Similar to a summary, it also provides a sum of
-// observations and an observation count.
+// configurable static buckets (or in dynamic sparse buckets as part of the
+// experimental Native Histograms, see below for more details). Similar to a
+// Summary, it also provides a sum of observations and an observation count.
//
// On the Prometheus server, quantiles can be calculated from a Histogram using
-// the histogram_quantile function in the query language.
+// the histogram_quantile PromQL function.
//
-// Note that Histograms, in contrast to Summaries, can be aggregated with the
-// Prometheus query language (see the documentation for detailed
-// procedures). However, Histograms require the user to pre-define suitable
-// buckets, and they are in general less accurate. The Observe method of a
-// Histogram has a very low performance overhead in comparison with the Observe
-// method of a Summary.
+// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL
+// (see the documentation for detailed procedures). However, Histograms require
+// the user to pre-define suitable buckets, and they are in general less
+// accurate. (Both problems are addressed by the experimental Native
+// Histograms. To use them, configure a NativeHistogramBucketFactor in the
+// HistogramOpts. They also require a Prometheus server v2.40+ with the
+// corresponding feature flag enabled.)
+//
+// The Observe method of a Histogram has a very low performance overhead in
+// comparison with the Observe method of a Summary.
//
// To create Histogram instances, use NewHistogram.
type Histogram interface {
@@ -50,7 +247,8 @@ type Histogram interface {
// Observe adds a single observation to the histogram. Observations are
// usually positive or zero. Negative observations are accepted but
// prevent current versions of Prometheus from properly detecting
- // counter resets in the sum of observations. See
+ // counter resets in the sum of observations. (The experimental Native
+ // Histograms handle negative observations properly.) See
// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
// for details.
Observe(float64)
@@ -64,18 +262,28 @@ const bucketLabel = "le"
// tailored to broadly measure the response time (in seconds) of a network
// service. Most likely, however, you will be required to define buckets
// customized to your use case.
-var (
- DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
- errBucketLabelNotAllowed = fmt.Errorf(
- "%q is not allowed as label name in histograms", bucketLabel,
- )
+// DefNativeHistogramZeroThreshold is the default value for
+// NativeHistogramZeroThreshold in the HistogramOpts.
+//
+// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation),
+// which is a bucket boundary at all possible resolutions.
+const DefNativeHistogramZeroThreshold = 2.938735877055719e-39
+
+// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold
+// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero
+// bucket that only receives observations of precisely zero.
+const NativeHistogramZeroThresholdZero = -1
+
+var errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
)
-// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
-// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
+// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the
+// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not
+// counted and not included in the returned slice. The returned slice is meant
+// to be used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is zero or negative.
func LinearBuckets(start, width float64, count int) []float64 {
@@ -90,11 +298,11 @@ func LinearBuckets(start, width float64, count int) []float64 {
return buckets
}
-// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
-// upper bound of 'start' and each following bucket's upper bound is 'factor'
-// times the previous bucket's upper bound. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
+// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket
+// has an upper bound of 'start' and each following bucket's upper bound is
+// 'factor' times the previous bucket's upper bound. The final +Inf bucket is
+// not counted and not included in the returned slice. The returned slice is
+// meant to be used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
// or if 'factor' is less than or equal 1.
@@ -116,6 +324,34 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
return buckets
}
+// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
+// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
+func ExponentialBucketsRange(min, max float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBucketsRange count needs a positive count")
+ }
+ if min <= 0 {
+ panic("ExponentialBucketsRange min needs to be greater than 0")
+ }
+
+ // Formula for exponential buckets.
+ // max = min*growthFactor^(bucketCount-1)
+
+ // We know max/min and highest bucket. Solve for growthFactor.
+ growthFactor := math.Pow(max/min, 1.0/float64(count-1))
+
+ // Now that we know growthFactor, solve for each bucket.
+ buckets := make([]float64, count)
+ for i := 1; i <= count; i++ {
+ buckets[i-1] = min * math.Pow(growthFactor, float64(i-1))
+ }
+ return buckets
+}
+
// HistogramOpts bundles the options for creating a Histogram metric. It is
// mandatory to set Name to a non-empty string. All other fields are optional
// and can safely be left at their zero value, although it is strongly
@@ -152,8 +388,124 @@ type HistogramOpts struct {
// element in the slice is the upper inclusive bound of a bucket. The
// values must be sorted in strictly increasing order. There is no need
// to add a highest bucket with +Inf bound, it will be added
- // implicitly. The default value is DefBuckets.
+ // implicitly. If Buckets is left as nil or set to a slice of length
+ // zero, it is replaced by default buckets. The default buckets are
+ // DefBuckets if no buckets for a native histogram (see below) are used,
+ // otherwise the default is no buckets. (In other words, if you want to
+ // use both regular buckets and buckets for a native histogram, you have
+ // to define the regular buckets here explicitly.)
Buckets []float64
+
+ // If NativeHistogramBucketFactor is greater than one, so-called sparse
+ // buckets are used (in addition to the regular buckets, if defined
+ // above). A Histogram with sparse buckets will be ingested as a Native
+ // Histogram by a Prometheus server with that feature enabled (requires
+ // Prometheus v2.40+). Sparse buckets are exponential buckets covering
+ // the whole float64 range (with the exception of the “zero” bucket, see
+ // NativeHistogramZeroThreshold below). From any one bucket to the next,
+ // the width of the bucket grows by a constant
+ // factor. NativeHistogramBucketFactor provides an upper bound for this
+ // factor (exception see below). The smaller
+ // NativeHistogramBucketFactor, the more buckets will be used and thus
+ // the more costly the histogram will become. A generally good trade-off
+ // between cost and accuracy is a value of 1.1 (each bucket is at most
+ // 10% wider than the previous one), which will result in each power of
+ // two divided into 8 buckets (e.g. there will be 8 buckets between 1
+ // and 2, same as between 2 and 4, and 4 and 8, etc.).
+ //
+ // Details about the actually used factor: The factor is calculated as
+ // 2^(2^-n), where n is an integer number between (and including) -4 and
+ // 8. n is chosen so that the resulting factor is the largest that is
+ // still smaller or equal to NativeHistogramBucketFactor. Note that the
+ // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
+ // ). If NativeHistogramBucketFactor is greater than 1 but smaller than
+ // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though
+ // it is larger than the provided NativeHistogramBucketFactor.
+ //
+ // NOTE: Native Histograms are still an experimental feature. Their
+ // behavior might still change without a major version
+ // bump. Subsequently, all NativeHistogram... options here might still
+ // change their behavior or name (or might completely disappear) without
+ // a major version bump.
+ NativeHistogramBucketFactor float64
+ // All observations with an absolute value of less or equal
+ // NativeHistogramZeroThreshold are accumulated into a “zero” bucket.
+ // For best results, this should be close to a bucket boundary. This is
+ // usually the case if picking a power of two. If
+ // NativeHistogramZeroThreshold is left at zero,
+ // DefNativeHistogramZeroThreshold is used as the threshold. To
+ // configure a zero bucket with an actual threshold of zero (i.e. only
+ // observations of precisely zero will go into the zero bucket), set
+ // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
+ // constant (or any negative float value).
+ NativeHistogramZeroThreshold float64
+
+ // The next three fields define a strategy to limit the number of
+ // populated sparse buckets. If NativeHistogramMaxBucketNumber is left
+ // at zero, the number of buckets is not limited. (Note that this might
+ // lead to unbounded memory consumption if the values observed by the
+ // Histogram are sufficiently wide-spread. In particular, this could be
+ // used as a DoS attack vector. Where the observed values depend on
+ // external inputs, it is highly recommended to set a
+ // NativeHistogramMaxBucketNumber.) Once the set
+ // NativeHistogramMaxBucketNumber is exceeded, the following strategy is
+ // enacted:
+ // - First, if the last reset (or the creation) of the histogram is at
+ // least NativeHistogramMinResetDuration ago, then the whole
+ // histogram is reset to its initial state (including regular
+ // buckets).
+ // - If less time has passed, or if NativeHistogramMinResetDuration is
+ // zero, no reset is performed. Instead, the zero threshold is
+ // increased sufficiently to reduce the number of buckets to or below
+ // NativeHistogramMaxBucketNumber, but not to more than
+ // NativeHistogramMaxZeroThreshold. Thus, if
+ // NativeHistogramMaxZeroThreshold is already at or below the current
+ // zero threshold, nothing happens at this step.
+ // - After that, if the number of buckets still exceeds
+ // NativeHistogramMaxBucketNumber, the resolution of the histogram is
+ // reduced by doubling the width of the sparse buckets (up to a
+ // growth factor between one bucket to the next of 2^(2^4) = 65536,
+ // see above).
+ // - Any increased zero threshold or reduced resolution is reset back
+ // to their original values once NativeHistogramMinResetDuration has
+ // passed (since the last reset or the creation of the histogram).
+ NativeHistogramMaxBucketNumber uint32
+ NativeHistogramMinResetDuration time.Duration
+ NativeHistogramMaxZeroThreshold float64
+
+ // NativeHistogramMaxExemplars limits the number of exemplars
+ // that are kept in memory for each native histogram. If you leave it at
+ // zero, a default value of 10 is used. If no exemplars should be kept specifically
+ // for native histograms, set it to a negative value. (Scrapers can
+ // still use the exemplars exposed for classic buckets, which are managed
+ // independently.)
+ NativeHistogramMaxExemplars int
+ // NativeHistogramExemplarTTL is only checked once
+ // NativeHistogramMaxExemplars is exceeded. In that case, the
+ // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL.
+ // Otherwise, the older exemplar in the pair of exemplars that are closest
+ // together (on an exponential scale) is removed.
+ // If NativeHistogramExemplarTTL is left at its zero value, a default value of
+ // 5m is used. To always delete the oldest exemplar, set it to a negative value.
+ NativeHistogramExemplarTTL time.Duration
+
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
+
+ // afterFunc is for testing purposes, by default it's time.AfterFunc.
+ afterFunc func(time.Duration, func()) *time.Timer
+}
+
+// HistogramVecOpts bundles the options to create a HistogramVec metric.
+// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type HistogramVecOpts struct {
+ HistogramOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Constraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
}
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
@@ -175,11 +527,11 @@ func NewHistogram(opts HistogramOpts) Histogram {
}
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
- if len(desc.variableLabels) != len(labelValues) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
+ if len(desc.variableLabels.names) != len(labelValues) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
}
- for _, n := range desc.variableLabels {
+ for _, n := range desc.variableLabels.names {
if n == bucketLabel {
panic(errBucketLabelNotAllowed)
}
@@ -190,16 +542,38 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
}
- if len(opts.Buckets) == 0 {
- opts.Buckets = DefBuckets
+ if opts.now == nil {
+ opts.now = time.Now
+ }
+ if opts.afterFunc == nil {
+ opts.afterFunc = time.AfterFunc
}
h := &histogram{
- desc: desc,
- upperBounds: opts.Buckets,
- labelPairs: MakeLabelPairs(desc, labelValues),
- counts: [2]*histogramCounts{{}, {}},
- now: time.Now,
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber,
+ nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
+ nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
+ lastResetTime: opts.now(),
+ now: opts.now,
+ afterFunc: opts.afterFunc,
+ }
+ if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
+ h.upperBounds = DefBuckets
+ }
+ if opts.NativeHistogramBucketFactor <= 1 {
+ h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets.
+ } else {
+ switch {
+ case opts.NativeHistogramZeroThreshold > 0:
+ h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold
+ case opts.NativeHistogramZeroThreshold == 0:
+ h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
+ } // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
+ h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
+ h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars)
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
@@ -218,8 +592,12 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
// Finally we know the final length of h.upperBounds and can make buckets
// for both counts as well as exemplars:
- h.counts[0].buckets = make([]uint64, len(h.upperBounds))
- h.counts[1].buckets = make([]uint64, len(h.upperBounds))
+ h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+ atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+ atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
+ h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+ atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+ atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
h.init(h) // Init self-collection.
@@ -227,13 +605,98 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
type histogramCounts struct {
+ // Order in this struct matters for the alignment required by atomic
+ // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+
// sumBits contains the bits of the float64 representing the sum of all
- // observations. sumBits and count have to go first in the struct to
- // guarantee alignment for atomic operations.
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ // observations.
sumBits uint64
count uint64
+
+ // nativeHistogramZeroBucket counts all (positive and negative)
+ // observations in the zero bucket (with an absolute value less or equal
+ // the current threshold, see next field.
+ nativeHistogramZeroBucket uint64
+ // nativeHistogramZeroThresholdBits is the bit pattern of the current
+ // threshold for the zero bucket. It's initially equal to
+ // nativeHistogramZeroThreshold but may change according to the bucket
+ // count limitation strategy.
+ nativeHistogramZeroThresholdBits uint64
+ // nativeHistogramSchema may change over time according to the bucket
+ // count limitation strategy and therefore has to be saved here.
+ nativeHistogramSchema int32
+ // Number of (positive and negative) sparse buckets.
+ nativeHistogramBucketsNumber uint32
+
+ // Regular buckets.
buckets []uint64
+
+ // The sparse buckets for native histograms are implemented with a
+ // sync.Map for now. A dedicated data structure will likely be more
+ // efficient. There are separate maps for negative and positive
+ // observations. The map's value is an *int64, counting observations in
+ // that bucket. (Note that we don't use uint64 as an int64 won't
+ // overflow in practice, and working with signed numbers from the
+ // beginning simplifies the handling of deltas.) The map's key is the
+ // index of the bucket according to the used
+ // nativeHistogramSchema. Index 0 is for an upper bound of 1.
+ nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map
+}
+
+// observe manages the parts of observe that only affects
+// histogramCounts. doSparse is true if sparse buckets should be done,
+// too.
+func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
+ if bucket < len(hc.buckets) {
+ atomic.AddUint64(&hc.buckets[bucket], 1)
+ }
+ atomicAddFloat(&hc.sumBits, v)
+ if doSparse && !math.IsNaN(v) {
+ var (
+ key int
+ schema = atomic.LoadInt32(&hc.nativeHistogramSchema)
+ zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits))
+ bucketCreated, isInf bool
+ )
+ if math.IsInf(v, 0) {
+ // Pretend v is MaxFloat64 but later increment key by one.
+ if math.IsInf(v, +1) {
+ v = math.MaxFloat64
+ } else {
+ v = -math.MaxFloat64
+ }
+ isInf = true
+ }
+ frac, exp := math.Frexp(math.Abs(v))
+ if schema > 0 {
+ bounds := nativeHistogramBounds[schema]
+ key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds)
+ } else {
+ key = exp
+ if frac == 0.5 {
+ key--
+ }
+ offset := (1 << -schema) - 1
+ key = (key + offset) >> -schema
+ }
+ if isInf {
+ key++
+ }
+ switch {
+ case v > zeroThreshold:
+ bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1)
+ case v < -zeroThreshold:
+ bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1)
+ default:
+ atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1)
+ }
+ if bucketCreated {
+ atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1)
+ }
+ }
+ // Increment count last as we take it as a signal that the observation
+ // is complete.
+ atomic.AddUint64(&hc.count, 1)
}
type histogram struct {
@@ -248,7 +711,7 @@ type histogram struct {
// perspective of the histogram) swap the hot–cold under the writeMtx
// lock. A cooldown is awaited (while locked) by comparing the number of
// observations with the initiation count. Once they match, then the
- // last observation on the now cool one has completed. All cool fields must
+ // last observation on the now cool one has completed. All cold fields must
// be merged into the new hot before releasing writeMtx.
//
// Fields with atomic access first! See alignment constraint:
@@ -256,8 +719,10 @@ type histogram struct {
countAndHotIdx uint64
selfCollector
- desc *Desc
- writeMtx sync.Mutex // Only used in the Write method.
+ desc *Desc
+
+ // Only used in the Write method and for sparse bucket management.
+ mtx sync.Mutex
// Two counts, one is "hot" for lock-free observations, the other is
// "cold" for writing out a dto.Metric. It has to be an array of
@@ -265,11 +730,27 @@ type histogram struct {
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
counts [2]*histogramCounts
- upperBounds []float64
- labelPairs []*dto.LabelPair
- exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
+ upperBounds []float64
+ labelPairs []*dto.LabelPair
+ exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
+ nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used.
+ nativeHistogramZeroThreshold float64 // The initial zero threshold.
+ nativeHistogramMaxZeroThreshold float64
+ nativeHistogramMaxBuckets uint32
+ nativeHistogramMinResetDuration time.Duration
+ // lastResetTime is protected by mtx. It is also used as created timestamp.
+ lastResetTime time.Time
+ // resetScheduled is protected by mtx. It is true if a reset is
+ // scheduled for a later time (when nativeHistogramMinResetDuration has
+ // passed).
+ resetScheduled bool
+ nativeExemplars nativeExemplars
+
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
- now func() time.Time // To mock out time.Now() for testing.
+ // afterFunc is for testing purposes, by default it's time.AfterFunc.
+ afterFunc func(time.Duration, func()) *time.Timer
}
func (h *histogram) Desc() *Desc {
@@ -280,6 +761,9 @@ func (h *histogram) Observe(v float64) {
h.observe(v, h.findBucket(v))
}
+// ObserveWithExemplar should not be called in a high-frequency setting
+// for a native histogram with configured exemplars. For this case,
+// the implementation isn't lock-free and might suffer from lock contention.
func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
i := h.findBucket(v)
h.observe(v, i)
@@ -291,8 +775,8 @@ func (h *histogram) Write(out *dto.Metric) error {
// the hot path, i.e. Observe is called much more often than Write. The
// complication of making Write lock-free isn't worth it, if possible at
// all.
- h.writeMtx.Lock()
- defer h.writeMtx.Unlock()
+ h.mtx.Lock()
+ defer h.mtx.Unlock()
// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
// without touching the count bits. See the struct comments for a full
@@ -305,16 +789,17 @@ func (h *histogram) Write(out *dto.Metric) error {
hotCounts := h.counts[n>>63]
coldCounts := h.counts[(^n)>>63]
- // Await cooldown.
- for count != atomic.LoadUint64(&coldCounts.count) {
- runtime.Gosched() // Let observations get work done.
- }
+ waitForCooldown(count, coldCounts)
his := &dto.Histogram{
- Bucket: make([]*dto.Bucket, len(h.upperBounds)),
- SampleCount: proto.Uint64(count),
- SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ Bucket: make([]*dto.Bucket, len(h.upperBounds)),
+ SampleCount: proto.Uint64(count),
+ SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ CreatedTimestamp: timestamppb.New(h.lastResetTime),
}
+ out.Histogram = his
+ out.Label = h.labelPairs
+
var cumCount uint64
for i, upperBound := range h.upperBounds {
cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
@@ -335,25 +820,38 @@ func (h *histogram) Write(out *dto.Metric) error {
}
his.Bucket = append(his.Bucket, b)
}
+ if h.nativeHistogramSchema > math.MinInt32 {
+ his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits)))
+ his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema))
+ zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket)
- out.Histogram = his
- out.Label = h.labelPairs
+ defer func() {
+ coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber))
+ coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber))
+ }()
- // Finally add all the cold counts to the new hot counts and reset the cold counts.
- atomic.AddUint64(&hotCounts.count, count)
- atomic.StoreUint64(&coldCounts.count, 0)
- for {
- oldBits := atomic.LoadUint64(&hotCounts.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
- if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
- atomic.StoreUint64(&coldCounts.sumBits, 0)
- break
+ his.ZeroCount = proto.Uint64(zeroBucket)
+ his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
+ his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
+
+ // Add a no-op span to a histogram without observations and with
+ // a zero threshold of zero. Otherwise, a native histogram would
+ // look like a classic histogram to scrapers.
+ if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 {
+ his.PositiveSpan = []*dto.BucketSpan{{
+ Offset: proto.Int32(0),
+ Length: proto.Uint32(0),
+ }}
}
+
+ if h.nativeExemplars.isEnabled() {
+ h.nativeExemplars.Lock()
+ his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...)
+ h.nativeExemplars.Unlock()
+ }
+
}
- for i := range h.upperBounds {
- atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
- atomic.StoreUint64(&coldCounts.buckets[i], 0)
- }
+ addAndResetCounts(hotCounts, coldCounts)
return nil
}
@@ -374,29 +872,258 @@ func (h *histogram) findBucket(v float64) int {
// observe is the implementation for Observe without the findBucket part.
func (h *histogram) observe(v float64, bucket int) {
+ // Do not add to sparse buckets for NaN observations.
+ doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
// We increment h.countAndHotIdx so that the counter in the lower
// 63 bits gets incremented. At the same time, we get the new value
// back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&h.countAndHotIdx, 1)
hotCounts := h.counts[n>>63]
+ hotCounts.observe(v, bucket, doSparse)
+ if doSparse {
+ h.limitBuckets(hotCounts, v, bucket)
+ }
+}
- if bucket < len(h.upperBounds) {
- atomic.AddUint64(&hotCounts.buckets[bucket], 1)
+// limitBuckets applies a strategy to limit the number of populated sparse
+// buckets. It's generally best effort, and there are situations where the
+// number can go higher (if even the lowest resolution isn't enough to reduce
+// the number sufficiently, or if the provided counts aren't fully updated yet
+// by a concurrently happening Write call).
+func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) {
+ if h.nativeHistogramMaxBuckets == 0 {
+ return // No limit configured.
}
- for {
- oldBits := atomic.LoadUint64(&hotCounts.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
- if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
- break
+ if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) {
+ return // Bucket limit not exceeded yet.
+ }
+
+ h.mtx.Lock()
+ defer h.mtx.Unlock()
+
+ // The hot counts might have been swapped just before we acquired the
+ // lock. Re-fetch the hot counts first...
+ n := atomic.LoadUint64(&h.countAndHotIdx)
+ hotIdx := n >> 63
+ coldIdx := (^n) >> 63
+ hotCounts := h.counts[hotIdx]
+ coldCounts := h.counts[coldIdx]
+ // ...and then check again if we really have to reduce the bucket count.
+ if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) {
+ return // Bucket limit not exceeded after all.
+ }
+ // Try the various strategies in order.
+ if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
+ return
+ }
+ // One of the other strategies will happen. To undo what they will do as
+ // soon as enough time has passed to satisfy
+ // h.nativeHistogramMinResetDuration, schedule a reset at the right time
+ // if we haven't done so already.
+ if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled {
+ h.resetScheduled = true
+ h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset)
+ }
+
+ if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
+ return
+ }
+ h.doubleBucketWidth(hotCounts, coldCounts)
+}
+
+// maybeReset resets the whole histogram if at least
+// h.nativeHistogramMinResetDuration has been passed. It returns true if the
+// histogram has been reset. The caller must have locked h.mtx.
+func (h *histogram) maybeReset(
+ hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
+) bool {
+ // We are using the possibly mocked h.now() rather than
+ // time.Since(h.lastResetTime) to enable testing.
+ if h.nativeHistogramMinResetDuration == 0 || // No reset configured.
+ h.resetScheduled || // Do not interefere if a reset is already scheduled.
+ h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
+ return false
+ }
+ // Completely reset coldCounts.
+ h.resetCounts(cold)
+ // Repeat the latest observation to not lose it completely.
+ cold.observe(value, bucket, true)
+ // Make coldCounts the new hot counts while resetting countAndHotIdx.
+ n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
+ count := n & ((1 << 63) - 1)
+ waitForCooldown(count, hot)
+ // Finally, reset the formerly hot counts, too.
+ h.resetCounts(hot)
+ h.lastResetTime = h.now()
+ return true
+}
+
+// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be
+// called without having locked h.mtx.
+func (h *histogram) reset() {
+ h.mtx.Lock()
+ defer h.mtx.Unlock()
+
+ n := atomic.LoadUint64(&h.countAndHotIdx)
+ hotIdx := n >> 63
+ coldIdx := (^n) >> 63
+ hot := h.counts[hotIdx]
+ cold := h.counts[coldIdx]
+ // Completely reset coldCounts.
+ h.resetCounts(cold)
+ // Make coldCounts the new hot counts while resetting countAndHotIdx.
+ n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63)
+ count := n & ((1 << 63) - 1)
+ waitForCooldown(count, hot)
+ // Finally, reset the formerly hot counts, too.
+ h.resetCounts(hot)
+ h.lastResetTime = h.now()
+ h.resetScheduled = false
+}
+
+// maybeWidenZeroBucket widens the zero bucket until it includes the existing
+// buckets closest to the zero bucket (which could be two, if an equidistant
+// negative and a positive bucket exists, but usually it's only one bucket to be
+// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold
+// limits how far the zero bucket can be extended, and if that's not enough to
+// include an existing bucket, the method returns false. The caller must have
+// locked h.mtx.
+func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool {
+ currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits))
+ if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold {
+ return false
+ }
+ // Find the key of the bucket closest to zero.
+ smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive)
+ smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative)
+ if smallestNegativeKey < smallestKey {
+ smallestKey = smallestNegativeKey
+ }
+ if smallestKey == math.MaxInt32 {
+ return false
+ }
+ newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema))
+ if newZeroThreshold > h.nativeHistogramMaxZeroThreshold {
+ return false // New threshold would exceed the max threshold.
+ }
+ atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+ // Remove applicable buckets.
+ if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded {
+ atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+ }
+ if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded {
+ atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+ }
+ // Make cold counts the new hot counts.
+ n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+ count := n & ((1 << 63) - 1)
+ // Swap the pointer names to represent the new roles and make
+ // the rest less confusing.
+ hot, cold = cold, hot
+ waitForCooldown(count, cold)
+ // Add all the now cold counts to the new hot counts...
+ addAndResetCounts(hot, cold)
+ // ...adjust the new zero threshold in the cold counts, too...
+ atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+ // ...and then merge the newly deleted buckets into the wider zero
+ // bucket.
+ mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool {
+ return func(k, v interface{}) bool {
+ key := k.(int)
+ bucket := v.(*int64)
+ if key == smallestKey {
+ // Merge into hot zero bucket...
+ atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket)))
+ // ...and delete from cold counts.
+ coldBuckets.Delete(key)
+ atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+ } else {
+ // Add to corresponding hot bucket...
+ if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+ atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+ }
+ // ...and reset cold bucket.
+ atomic.StoreInt64(bucket, 0)
+ }
+ return true
}
}
- // Increment count last as we take it as a signal that the observation
- // is complete.
- atomic.AddUint64(&hotCounts.count, 1)
+
+ cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive))
+ cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative))
+ return true
}
-// updateExemplar replaces the exemplar for the provided bucket. With empty
-// labels, it's a no-op. It panics if any of the labels is invalid.
+// doubleBucketWidth doubles the bucket width (by decrementing the schema
+// number). Note that very sparse buckets could lead to a low reduction of the
+// bucket count (or even no reduction at all). The method does nothing if the
+// schema is already -4.
+func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) {
+ coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema)
+ if coldSchema == -4 {
+ return // Already at lowest resolution.
+ }
+ coldSchema--
+ atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+ // Play it simple and just delete all cold buckets.
+ atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+ deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+ deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+ // Make coldCounts the new hot counts.
+ n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+ count := n & ((1 << 63) - 1)
+ // Swap the pointer names to represent the new roles and make
+ // the rest less confusing.
+ hot, cold = cold, hot
+ waitForCooldown(count, cold)
+ // Add all the now cold counts to the new hot counts...
+ addAndResetCounts(hot, cold)
+ // ...adjust the schema in the cold counts, too...
+ atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+ // ...and then merge the cold buckets into the wider hot buckets.
+ merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool {
+ return func(k, v interface{}) bool {
+ key := k.(int)
+ bucket := v.(*int64)
+ // Adjust key to match the bucket to merge into.
+ if key > 0 {
+ key++
+ }
+ key /= 2
+ // Add to corresponding hot bucket.
+ if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+ atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+ }
+ return true
+ }
+ }
+
+ cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive))
+ cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative))
+ // Play it simple again and just delete all cold buckets.
+ atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+ deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+ deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+}
+
+func (h *histogram) resetCounts(counts *histogramCounts) {
+ atomic.StoreUint64(&counts.sumBits, 0)
+ atomic.StoreUint64(&counts.count, 0)
+ atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0)
+ atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+ atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema)
+ atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0)
+ for i := range h.upperBounds {
+ atomic.StoreUint64(&counts.buckets[i], 0)
+ }
+ deleteSyncMap(&counts.nativeHistogramBucketsNegative)
+ deleteSyncMap(&counts.nativeHistogramBucketsPositive)
+}
+
+// updateExemplar replaces the exemplar for the provided classic bucket.
+// With empty labels, it's a no-op. It panics if any of the labels is invalid.
+// If histogram is native, the exemplar will be cached into nativeExemplars,
+// which has a limit, and will remove one exemplar when limit is reached.
func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
if l == nil {
return
@@ -406,6 +1133,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
panic(err)
}
h.exemplars[bucket].Store(e)
+ doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
+ if doSparse {
+ h.nativeExemplars.addExemplar(e)
+ }
}
// HistogramVec is a Collector that bundles a set of Histograms that all share the
@@ -420,15 +1151,23 @@ type HistogramVec struct {
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
// partitioned by the given label names.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
- desc := NewDesc(
+ return V2.NewHistogramVec(HistogramVecOpts{
+ HistogramOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
+func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &HistogramVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- return newHistogram(desc, opts, lvs...)
+ return newHistogram(desc, opts.HistogramOpts, lvs...)
}),
}
}
@@ -488,7 +1227,8 @@ func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Observe(42.21)
+//
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
h, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
@@ -499,7 +1239,8 @@ func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
// With works as GetMetricWith but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+//
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
func (v *HistogramVec) With(labels Labels) Observer {
h, err := v.GetMetricWith(labels)
if err != nil {
@@ -545,6 +1286,7 @@ type constHistogram struct {
sum float64
buckets map[float64]uint64
labelPairs []*dto.LabelPair
+ createdTs *timestamppb.Timestamp
}
func (h *constHistogram) Desc() *Desc {
@@ -552,12 +1294,14 @@ func (h *constHistogram) Desc() *Desc {
}
func (h *constHistogram) Write(out *dto.Metric) error {
- his := &dto.Histogram{}
+ his := &dto.Histogram{
+ CreatedTimestamp: h.createdTs,
+ }
+
buckets := make([]*dto.Bucket, 0, len(h.buckets))
his.SampleCount = proto.Uint64(h.count)
his.SampleSum = proto.Float64(h.sum)
-
for upperBound, count := range h.buckets {
buckets = append(buckets, &dto.Bucket{
CumulativeCount: proto.Uint64(count),
@@ -585,7 +1329,7 @@ func (h *constHistogram) Write(out *dto.Metric) error {
// to send it to Prometheus in the Collect method.
//
// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
-// bucket.
+// bucket. The +Inf bucket is implicit, and its value is equal to the provided count.
//
// NewConstHistogram returns an error if the length of labelValues is not
// consistent with the variable labels in Desc or if Desc is invalid.
@@ -599,7 +1343,7 @@ func NewConstHistogram(
if desc.err != nil {
return nil, desc.err
}
- if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
return &constHistogram{
@@ -627,6 +1371,48 @@ func MustNewConstHistogram(
return m
}
+// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp.
+func NewConstHistogramWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ ct time.Time,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ createdTs: timestamppb.New(ct),
+ }, nil
+}
+
+// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where
+// NewConstHistogramWithCreatedTimestamp would have returned an error.
+func MustNewConstHistogramWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ ct time.Time,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
type buckSort []*dto.Bucket
func (s buckSort) Len() int {
@@ -640,3 +1426,412 @@ func (s buckSort) Swap(i, j int) {
func (s buckSort) Less(i, j int) bool {
return s[i].GetUpperBound() < s[j].GetUpperBound()
}
+
+// pickSchema returns the largest number n between -4 and 8 such that
+// 2^(2^-n) is less or equal the provided bucketFactor.
+//
+// Special cases:
+// - bucketFactor <= 1: panics.
+// - bucketFactor < 2^(2^-8) (but > 1): still returns 8.
+func pickSchema(bucketFactor float64) int32 {
+ if bucketFactor <= 1 {
+ panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor))
+ }
+ floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
+ switch {
+ case floor <= -8:
+ return 8
+ case floor >= 4:
+ return -4
+ default:
+ return -int32(floor)
+ }
+}
+
+func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
+ var ii []int
+ buckets.Range(func(k, v interface{}) bool {
+ ii = append(ii, k.(int))
+ return true
+ })
+ sort.Ints(ii)
+
+ if len(ii) == 0 {
+ return nil, nil
+ }
+
+ var (
+ spans []*dto.BucketSpan
+ deltas []int64
+ prevCount int64
+ nextI int
+ )
+
+ appendDelta := func(count int64) {
+ *spans[len(spans)-1].Length++
+ deltas = append(deltas, count-prevCount)
+ prevCount = count
+ }
+
+ for n, i := range ii {
+ v, _ := buckets.Load(i)
+ count := atomic.LoadInt64(v.(*int64))
+ // Multiple spans with only small gaps in between are probably
+ // encoded more efficiently as one larger span with a few empty
+ // buckets. Needs some research to find the sweet spot. For now,
+ // we assume that gaps of one or two buckets should not create
+ // a new span.
+ iDelta := int32(i - nextI)
+ if n == 0 || iDelta > 2 {
+ // We have to create a new span, either because we are
+ // at the very beginning, or because we have found a gap
+ // of more than two buckets.
+ spans = append(spans, &dto.BucketSpan{
+ Offset: proto.Int32(iDelta),
+ Length: proto.Uint32(0),
+ })
+ } else {
+ // We have found a small gap (or no gap at all).
+ // Insert empty buckets as needed.
+ for j := int32(0); j < iDelta; j++ {
+ appendDelta(0)
+ }
+ }
+ appendDelta(count)
+ nextI = i + 1
+ }
+ return spans, deltas
+}
+
+// addToBucket increments the sparse bucket at key by the provided amount. It
+// returns true if a new sparse bucket had to be created for that.
+func addToBucket(buckets *sync.Map, key int, increment int64) bool {
+ if existingBucket, ok := buckets.Load(key); ok {
+ // Fast path without allocation.
+ atomic.AddInt64(existingBucket.(*int64), increment)
+ return false
+ }
+ // Bucket doesn't exist yet. Slow path allocating new counter.
+ newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape.
+ if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded {
+ // The bucket was created concurrently in another goroutine.
+ // Have to increment after all.
+ atomic.AddInt64(actualBucket.(*int64), increment)
+ return false
+ }
+ return true
+}
+
+// addAndReset returns a function to be used with sync.Map.Range of spare
+// buckets in coldCounts. It increments the buckets in the provided hotBuckets
+// according to the buckets ranged through. It then resets all buckets ranged
+// through to 0 (but leaves them in place so that they don't need to get
+// recreated on the next scrape).
+func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool {
+ return func(k, v interface{}) bool {
+ bucket := v.(*int64)
+ if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) {
+ atomic.AddUint32(bucketNumber, 1)
+ }
+ atomic.StoreInt64(bucket, 0)
+ return true
+ }
+}
+
+func deleteSyncMap(m *sync.Map) {
+ m.Range(func(k, v interface{}) bool {
+ m.Delete(k)
+ return true
+ })
+}
+
+func findSmallestKey(m *sync.Map) int {
+ result := math.MaxInt32
+ m.Range(func(k, v interface{}) bool {
+ key := k.(int)
+ if key < result {
+ result = key
+ }
+ return true
+ })
+ return result
+}
+
+func getLe(key int, schema int32) float64 {
+ // Here a bit of context about the behavior for the last bucket counting
+ // regular numbers (called simply "last bucket" below) and the bucket
+ // counting observations of ±Inf (called "inf bucket" below, with a key
+ // one higher than that of the "last bucket"):
+ //
+ // If we apply the usual formula to the last bucket, its upper bound
+ // would be calculated as +Inf. The reason is that the max possible
+ // regular float64 number (math.MaxFloat64) doesn't coincide with one of
+ // the calculated bucket boundaries. So the calculated boundary has to
+ // be larger than math.MaxFloat64, and the only float64 larger than
+ // math.MaxFloat64 is +Inf. However, we want to count actual
+ // observations of ±Inf in the inf bucket. Therefore, we have to treat
+ // the upper bound of the last bucket specially and set it to
+ // math.MaxFloat64. (The upper bound of the inf bucket, with its key
+ // being one higher than that of the last bucket, naturally comes out as
+ // +Inf by the usual formula. So that's fine.)
+ //
+ // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of
+ // 1024. If there were a float64 number following math.MaxFloat64, it
+ // would have a frac of 1.0 and an exp of 1024, or equivalently a frac
+ // of 0.5 and an exp of 1025. However, since frac must be smaller than
+ // 1, and exp must be smaller than 1025, either representation overflows
+ // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the
+ // largest possible float64. Q.E.D.) However, the formula for
+ // calculating the upper bound from the idx and schema of the last
+ // bucket results in precisely that. It is either frac=1.0 & exp=1024
+ // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
+ // by the way, a power of two where the exponent itself is a power of
+ // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
+ // schemas.) So these are the special cases we have to catch below.
+ if schema < 0 {
+ exp := key << -schema
+ if exp == 1024 {
+ // This is the last bucket before the overflow bucket
+ // (for ±Inf observations). Return math.MaxFloat64 as
+ // explained above.
+ return math.MaxFloat64
+ }
+ return math.Ldexp(1, exp)
+ }
+
+ fracIdx := key & ((1 << schema) - 1)
+ frac := nativeHistogramBounds[schema][fracIdx]
+ exp := (key >> schema) + 1
+ if frac == 0.5 && exp == 1025 {
+ // This is the last bucket before the overflow bucket (for ±Inf
+ // observations). Return math.MaxFloat64 as explained above.
+ return math.MaxFloat64
+ }
+ return math.Ldexp(frac, exp)
+}
+
+// waitForCooldown returns after the count field in the provided histogramCounts
+// has reached the provided count value.
+func waitForCooldown(count uint64, counts *histogramCounts) {
+ for count != atomic.LoadUint64(&counts.count) {
+ runtime.Gosched() // Let observations get work done.
+ }
+}
+
+// atomicAddFloat adds the provided float atomically to another float
+// represented by the bit pattern the bits pointer is pointing to.
+func atomicAddFloat(bits *uint64, v float64) {
+ for {
+ loadedBits := atomic.LoadUint64(bits)
+ newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
+ if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
+ break
+ }
+ }
+}
+
+// atomicDecUint32 atomically decrements the uint32 p points to. See
+// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done.
+func atomicDecUint32(p *uint32) {
+ atomic.AddUint32(p, ^uint32(0))
+}
+
+// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero
+// bucket) from the cold counts to the corresponding fields in the hot
+// counts. Those fields are then reset to 0 in the cold counts.
+func addAndResetCounts(hot, cold *histogramCounts) {
+ atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count))
+ atomic.StoreUint64(&cold.count, 0)
+ coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits))
+ atomicAddFloat(&hot.sumBits, coldSum)
+ atomic.StoreUint64(&cold.sumBits, 0)
+ for i := range hot.buckets {
+ atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i]))
+ atomic.StoreUint64(&cold.buckets[i], 0)
+ }
+ atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
+ atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
+}
+
+type nativeExemplars struct {
+ sync.Mutex
+
+ // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0.
+ // The ttl is used on insertion to remove an exemplar that is older than ttl, if present.
+ ttl time.Duration
+
+ exemplars []*dto.Exemplar
+}
+
+func (n *nativeExemplars) isEnabled() bool {
+ return n.ttl != -1
+}
+
+func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
+ if ttl == 0 {
+ ttl = 5 * time.Minute
+ }
+
+ if maxCount == 0 {
+ maxCount = 10
+ }
+
+ if maxCount < 0 {
+ maxCount = 0
+ ttl = -1
+ }
+
+ return nativeExemplars{
+ ttl: ttl,
+ exemplars: make([]*dto.Exemplar, 0, maxCount),
+ }
+}
+
+func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
+ if !n.isEnabled() {
+ return
+ }
+
+ n.Lock()
+ defer n.Unlock()
+
+ // When the number of exemplars has not yet exceeded or
+ // is equal to cap(n.exemplars), then
+ // insert the new exemplar directly.
+ if len(n.exemplars) < cap(n.exemplars) {
+ var nIdx int
+ for nIdx = 0; nIdx < len(n.exemplars); nIdx++ {
+ if *e.Value < *n.exemplars[nIdx].Value {
+ break
+ }
+ }
+ n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)
+ return
+ }
+
+ if len(n.exemplars) == 1 {
+ // When the number of exemplars is 1, then
+ // replace the existing exemplar with the new exemplar.
+ n.exemplars[0] = e
+ return
+ }
+ // From this point on, the number of exemplars is greater than 1.
+
+ // When the number of exemplars exceeds the limit, remove one exemplar.
+ var (
+ ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop.
+ otIdx = -1 // Index of the exemplar with the oldest timestamp.
+
+ md = -1.0 // Logarithm of the delta of the closest pair of exemplars.
+
+ // The insertion point of the new exemplar in the exemplars slice after insertion.
+ // This is calculated purely based on the order of the exemplars by value.
+ // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end.
+ nIdx = -1
+
+ // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar.
+ // The aim is to keep a good spread of exemplars by value and not let them bunch up too much.
+ // It is calculated in 3 steps:
+ // 1. First we set rIdx to the index of the older exemplar within the closest pair by value.
+ // That is the following will be true (on log scale):
+ // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have
+ // the closest values to each other from all pairs.
+ // For example, suppose the values are distributed like this:
+ // |-----------x-------------x----------------x----x-----|
+ // ^--rIdx as this is older.
+ // Or like this:
+ // |-----------x-------------x----------------x----x-----|
+ // ^--rIdx as this is older.
+ // 2. If there is an exemplar that expired, then we simple reset rIdx to that index.
+ // 3. We check if by inserting the new exemplar we would create a closer pair at
+ // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to
+ // keep the spread of exemplars by value; otherwise we keep rIdx as it is.
+ rIdx = -1
+ cLog float64 // Logarithm of the current exemplar.
+ pLog float64 // Logarithm of the previous exemplar.
+ )
+
+ for i, exemplar := range n.exemplars {
+ // Find the exemplar with the oldest timestamp.
+ if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) {
+ ot = exemplar.Timestamp.AsTime()
+ otIdx = i
+ }
+
+ // Find the index at which to insert new the exemplar.
+ if nIdx == -1 && *e.Value <= *exemplar.Value {
+ nIdx = i
+ }
+
+ // Find the two closest exemplars and pick the one the with older timestamp.
+ pLog = cLog
+ cLog = math.Log(exemplar.GetValue())
+ if i == 0 {
+ continue
+ }
+ diff := math.Abs(cLog - pLog)
+ if md == -1 || diff < md {
+ // The closest exemplar pair is at index: i-1, i.
+ // Choose the exemplar with the older timestamp for replacement.
+ md = diff
+ if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) {
+ rIdx = i
+ } else {
+ rIdx = i - 1
+ }
+ }
+
+ }
+
+ // If all existing exemplar are smaller than new exemplar,
+ // then the exemplar should be inserted at the end.
+ if nIdx == -1 {
+ nIdx = len(n.exemplars)
+ }
+ // Here, we have the following relationships:
+ // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0)
+ // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars))
+
+ if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl {
+ // If the oldest exemplar has expired, then replace it with the new exemplar.
+ rIdx = otIdx
+ } else {
+ // In the previous for loop, when calculating the closest pair of exemplars,
+ // we did not take into account the newly inserted exemplar.
+ // So we need to calculate with the newly inserted exemplar again.
+ elog := math.Log(e.GetValue())
+ if nIdx > 0 {
+ diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue()))
+ if diff < md {
+ // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx.
+ // v--rIdx
+ // |-----------x-n-----------x----------------x----x-----|
+ // nIdx-1--^ ^--new exemplar value
+ // Do not make the spread worse, replace nIdx-1 and not rIdx.
+ md = diff
+ rIdx = nIdx - 1
+ }
+ }
+ if nIdx < len(n.exemplars) {
+ diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog)
+ if diff < md {
+ // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx.
+ // v--rIdx
+ // |-----------x-----------n-x----------------x----x-----|
+ // new exemplar value--^ ^--nIdx
+ // Do not make the spread worse, replace nIdx-1 and not rIdx.
+ rIdx = nIdx
+ }
+ }
+ }
+
+ // Adjust the slice according to rIdx and nIdx.
+ switch {
+ case rIdx == nIdx:
+ n.exemplars[nIdx] = e
+ case rIdx < nIdx:
+ n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...)
+ case rIdx > nIdx:
+ n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
new file mode 100644
index 00000000000..1ed5abe74c1
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2015 Björn Rabenstein
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+//
+// The code in this package is copy/paste to avoid a dependency. Hence this file
+// carries the copyright of the original repo.
+// https://github.com/beorn7/floats
+package internal
+
+import (
+ "math"
+)
+
+// minNormalFloat64 is the smallest positive normal value of type float64.
+var minNormalFloat64 = math.Float64frombits(0x0010000000000000)
+
+// AlmostEqualFloat64 returns true if a and b are equal within a relative error
+// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the
+// details of the applied method.
+func AlmostEqualFloat64(a, b, epsilon float64) bool {
+ if a == b {
+ return true
+ }
+ absA := math.Abs(a)
+ absB := math.Abs(b)
+ diff := math.Abs(a - b)
+ if a == 0 || b == 0 || absA+absB < minNormalFloat64 {
+ return diff < epsilon*minNormalFloat64
+ }
+ return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon
+}
+
+// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64.
+func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if !AlmostEqualFloat64(a[i], b[i], epsilon) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
new file mode 100644
index 00000000000..a595a203625
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
@@ -0,0 +1,654 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// Maintaining `GetUnifiedDiffString` here because original repository
+// (https://github.com/pmezard/go-difflib) is no longer maintained.
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func calculateRatio(matches, length int) float64 {
+ if length > 0 {
+ return 2.0 * float64(matches) / float64(length)
+ }
+ return 1.0
+}
+
+type Match struct {
+ A int
+ B int
+ Size int
+}
+
+type OpCode struct {
+ Tag byte
+ I1 int
+ I2 int
+ J1 int
+ J2 int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching". The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk). The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence. This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence. That's what
+// catches peoples' eyes. The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff. This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "" lines in HTML files). That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" .
+//
+// Timing: Basic R-O is cubic time worst case and quadratic time expected
+// case. SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+ a []string
+ b []string
+ b2j map[string][]int
+ IsJunk func(string) bool
+ autoJunk bool
+ bJunk map[string]struct{}
+ matchingBlocks []Match
+ fullBCount map[string]int
+ bPopular map[string]struct{}
+ opCodes []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+ m := SequenceMatcher{autoJunk: true}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+ isJunk func(string) bool,
+) *SequenceMatcher {
+ m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+ m.SetSeq1(a)
+ m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+ if &a == &m.a {
+ return
+ }
+ m.a = a
+ m.matchingBlocks = nil
+ m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+ if &b == &m.b {
+ return
+ }
+ m.b = b
+ m.matchingBlocks = nil
+ m.opCodes = nil
+ m.fullBCount = nil
+ m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+ // Populate line -> index mapping
+ b2j := map[string][]int{}
+ for i, s := range m.b {
+ indices := b2j[s]
+ indices = append(indices, i)
+ b2j[s] = indices
+ }
+
+ // Purge junk elements
+ m.bJunk = map[string]struct{}{}
+ if m.IsJunk != nil {
+ junk := m.bJunk
+ for s := range b2j {
+ if m.IsJunk(s) {
+ junk[s] = struct{}{}
+ }
+ }
+ for s := range junk {
+ delete(b2j, s)
+ }
+ }
+
+ // Purge remaining popular elements
+ popular := map[string]struct{}{}
+ n := len(m.b)
+ if m.autoJunk && n >= 200 {
+ ntest := n/100 + 1
+ for s, indices := range b2j {
+ if len(indices) > ntest {
+ popular[s] = struct{}{}
+ }
+ }
+ for s := range popular {
+ delete(b2j, s)
+ }
+ }
+ m.bPopular = popular
+ m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+ _, ok := m.bJunk[s]
+ return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+//
+// alo <= i <= i+k <= ahi
+// blo <= j <= j+k <= bhi
+//
+// and for all (i',j',k') meeting those conditions,
+//
+// k >= k'
+// i <= i'
+// and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block. Then that block is extended as
+// far as possible by matching (only) junk elements on both sides. So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+ // CAUTION: stripping common prefix or suffix would be incorrect.
+ // E.g.,
+ // ab
+ // acab
+ // Longest matching block is "ab", but if common prefix is
+ // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
+ // strip, so ends up claiming that ab is changed to acab by
+ // inserting "ca" in the middle. That's minimal but unintuitive:
+ // "it's obvious" that someone inserted "ac" at the front.
+ // Windiff ends up at the same place as diff, but by pairing up
+ // the unique 'b's and then matching the first two 'a's.
+ besti, bestj, bestsize := alo, blo, 0
+
+ // find longest junk-free match
+ // during an iteration of the loop, j2len[j] = length of longest
+ // junk-free match ending with a[i-1] and b[j]
+ j2len := map[int]int{}
+ for i := alo; i != ahi; i++ {
+ // look at all instances of a[i] in b; note that because
+ // b2j has no junk keys, the loop is skipped if a[i] is junk
+ newj2len := map[int]int{}
+ for _, j := range m.b2j[m.a[i]] {
+ // a[i] matches b[j]
+ if j < blo {
+ continue
+ }
+ if j >= bhi {
+ break
+ }
+ k := j2len[j-1] + 1
+ newj2len[j] = k
+ if k > bestsize {
+ besti, bestj, bestsize = i-k+1, j-k+1, k
+ }
+ }
+ j2len = newj2len
+ }
+
+ // Extend the best by non-junk elements on each end. In particular,
+ // "popular" non-junk elements aren't in b2j, which greatly speeds
+ // the inner loop above, but also means "the best" match so far
+ // doesn't contain any junk *or* popular non-junk elements.
+ for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ !m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize++
+ }
+
+ // Now that we have a wholly interesting match (albeit possibly
+ // empty!), we may as well suck up the matching junk on each
+ // side of it too. Can't think of a good reason not to, and it
+ // saves post-processing the (possibly considerable) expense of
+ // figuring out what to do with it. In the case of an empty
+ // interesting match, this is clearly the right thing to do,
+ // because no other kind of match is possible in the regions.
+ for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize++
+ }
+
+ return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+ if m.matchingBlocks != nil {
+ return m.matchingBlocks
+ }
+
+ var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+ matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+ match := m.findLongestMatch(alo, ahi, blo, bhi)
+ i, j, k := match.A, match.B, match.Size
+ if match.Size > 0 {
+ if alo < i && blo < j {
+ matched = matchBlocks(alo, i, blo, j, matched)
+ }
+ matched = append(matched, match)
+ if i+k < ahi && j+k < bhi {
+ matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+ }
+ }
+ return matched
+ }
+ matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+ // It's possible that we have adjacent equal blocks in the
+ // matching_blocks list now.
+ nonAdjacent := []Match{}
+ i1, j1, k1 := 0, 0, 0
+ for _, b := range matched {
+ // Is this block adjacent to i1, j1, k1?
+ i2, j2, k2 := b.A, b.B, b.Size
+ if i1+k1 == i2 && j1+k1 == j2 {
+ // Yes, so collapse them -- this just increases the length of
+ // the first block by the length of the second, and the first
+ // block so lengthened remains the block to compare against.
+ k1 += k2
+ } else {
+ // Not adjacent. Remember the first block (k1==0 means it's
+ // the dummy we started with), and make the second block the
+ // new block to compare against.
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+ i1, j1, k1 = i2, j2, k2
+ }
+ }
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+
+ nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+ m.matchingBlocks = nonAdjacent
+ return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal): a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+ if m.opCodes != nil {
+ return m.opCodes
+ }
+ i, j := 0, 0
+ matching := m.GetMatchingBlocks()
+ opCodes := make([]OpCode, 0, len(matching))
+ for _, m := range matching {
+ // invariant: we've pumped out correct diffs to change
+ // a[:i] into b[:j], and the next matching block is
+ // a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+ // out a diff to change a[i:ai] into b[j:bj], pump out
+ // the matching block, and move (i,j) beyond the match
+ ai, bj, size := m.A, m.B, m.Size
+ tag := byte(0)
+ if i < ai && j < bj {
+ tag = 'r'
+ } else if i < ai {
+ tag = 'd'
+ } else if j < bj {
+ tag = 'i'
+ }
+ if tag > 0 {
+ opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+ }
+ i, j = ai+size, bj+size
+ // the list of matching blocks is terminated by a
+ // sentinel with size 0
+ if size > 0 {
+ opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+ }
+ }
+ m.opCodes = opCodes
+ return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+ if n < 0 {
+ n = 3
+ }
+ codes := m.GetOpCodes()
+ if len(codes) == 0 {
+ codes = []OpCode{{'e', 0, 1, 0, 1}}
+ }
+ // Fixup leading and trailing groups if they show no changes.
+ if codes[0].Tag == 'e' {
+ c := codes[0]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
+ }
+ if codes[len(codes)-1].Tag == 'e' {
+ c := codes[len(codes)-1]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
+ }
+ nn := n + n
+ groups := [][]OpCode{}
+ group := []OpCode{}
+ for _, c := range codes {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ // End the current group and start a new one whenever
+ // there is a large range with no changes.
+ if c.Tag == 'e' && i2-i1 > nn {
+ group = append(group, OpCode{
+ c.Tag, i1, min(i2, i1+n),
+ j1, min(j2, j1+n),
+ })
+ groups = append(groups, group)
+ group = []OpCode{}
+ i1, j1 = max(i1, i2-n), max(j1, j2-n)
+ }
+ group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+ }
+ if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+ groups = append(groups, group)
+ }
+ return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+ matches := 0
+ for _, m := range m.GetMatchingBlocks() {
+ matches += m.Size
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+ // viewing a and b as multisets, set matches to the cardinality
+ // of their intersection; this counts the number of matches
+ // without regard to order, so is clearly an upper bound
+ if m.fullBCount == nil {
+ m.fullBCount = map[string]int{}
+ for _, s := range m.b {
+ m.fullBCount[s]++
+ }
+ }
+
+ // avail[x] is the number of times x appears in 'b' less the
+ // number of times we've seen it in 'a' so far ... kinda
+ avail := map[string]int{}
+ matches := 0
+ for _, s := range m.a {
+ n, ok := avail[s]
+ if !ok {
+ n = m.fullBCount[s]
+ }
+ avail[s] = n - 1
+ if n > 0 {
+ matches++
+ }
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+ la, lb := len(m.a), len(m.b)
+ return calculateRatio(min(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ if length == 0 {
+ beginning-- // empty ranges begin at line just before the range
+ }
+ return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+ A []string // First sequence lines
+ FromFile string // First file name
+ FromDate string // First file time
+ B []string // Second sequence lines
+ ToFile string // Second file name
+ ToDate string // Second file time
+ Eol string // Headers end of line, defaults to LF
+ Context int // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline. This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times. Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ wf := func(format string, args ...interface{}) error {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ return err
+ }
+ ws := func(s string) error {
+ _, err := buf.WriteString(s)
+ return err
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ first, last := g[0], g[len(g)-1]
+ range1 := formatRangeUnified(first.I1, last.I2)
+ range2 := formatRangeUnified(first.J1, last.J2)
+ if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+ return err
+ }
+ for _, c := range g {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ if c.Tag == 'e' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws(" " + line); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws("-" + line); err != nil {
+ return err
+ }
+ }
+ }
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, line := range diff.B[j1:j2] {
+ if err := ws("+" + line); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteUnifiedDiff(w, diff)
+ return w.String(), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+ lines := strings.SplitAfter(s, "\n")
+ lines[len(lines)-1] += "\n"
+ return lines
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
new file mode 100644
index 00000000000..a4fa6eabd78
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
@@ -0,0 +1,34 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import "regexp"
+
+type GoCollectorRule struct {
+ Matcher *regexp.Regexp
+ Deny bool
+}
+
+// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
+// Use it via collectors package instead. See issue
+// https://github.com/prometheus/client_golang/issues/1030.
+//
+// This is internal, so external users only can use it via `collector.WithGoCollector*` methods
+type GoCollectorOptions struct {
+ DisableMemStatsLikeMetrics bool
+ RuntimeMetricSumForHist map[string]string
+ RuntimeMetricRules []GoCollectorRule
+}
+
+var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
new file mode 100644
index 00000000000..97d17d6cb60
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
@@ -0,0 +1,142 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package internal
+
+import (
+ "math"
+ "path"
+ "runtime/metrics"
+ "strings"
+
+ "github.com/prometheus/common/model"
+)
+
+// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
+// metric description and validates whether the metric is suitable for integration
+// with Prometheus.
+//
+// Returns false if a name could not be produced, or if Prometheus does not understand
+// the runtime/metrics Kind.
+//
+// Note that the main reason a name couldn't be produced is if the runtime/metrics
+// package exports a name with characters outside the valid Prometheus metric name
+// character set. This is theoretically possible, but should never happen in practice.
+// Still, don't rely on it.
+func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
+ namespace := "go"
+
+ comp := strings.SplitN(d.Name, ":", 2)
+ key := comp[0]
+ unit := comp[1]
+
+ // The last path element in the key is the name,
+ // the rest is the subsystem.
+ subsystem := path.Dir(key[1:] /* remove leading / */)
+ name := path.Base(key)
+
+ // subsystem is translated by replacing all / and - with _.
+ subsystem = strings.ReplaceAll(subsystem, "/", "_")
+ subsystem = strings.ReplaceAll(subsystem, "-", "_")
+
+ // unit is translated assuming that the unit contains no
+ // non-ASCII characters.
+ unit = strings.ReplaceAll(unit, "-", "_")
+ unit = strings.ReplaceAll(unit, "*", "_")
+ unit = strings.ReplaceAll(unit, "/", "_per_")
+
+ // name has - replaced with _ and is concatenated with the unit and
+ // other data.
+ name = strings.ReplaceAll(name, "-", "_")
+ name += "_" + unit
+ if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
+ name += "_total"
+ }
+
+ valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
+ switch d.Kind {
+ case metrics.KindUint64:
+ case metrics.KindFloat64:
+ case metrics.KindFloat64Histogram:
+ default:
+ valid = false
+ }
+ return namespace, subsystem, name, valid
+}
+
+// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
+// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
+// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
+// as the bottom-most upper-bound inclusive bucket in Prometheus.
+func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
+ switch unit {
+ case "bytes":
+ // Re-bucket as powers of 2.
+ return reBucketExp(buckets, 2)
+ case "seconds":
+ // Re-bucket as powers of 10 and then merge all buckets greater
+ // than 1 second into the +Inf bucket.
+ b := reBucketExp(buckets, 10)
+ for i := range b {
+ if b[i] <= 1 {
+ continue
+ }
+ b[i] = math.Inf(1)
+ b = b[:i+1]
+ break
+ }
+ return b
+ }
+ return buckets
+}
+
+// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
+// downsamples the buckets to those a multiple of base apart. The end result
+// is a roughly exponential (in many cases, perfectly exponential) bucketing
+// scheme.
+func reBucketExp(buckets []float64, base float64) []float64 {
+ bucket := buckets[0]
+ var newBuckets []float64
+ // We may see a -Inf here, in which case, add it and skip it
+ // since we risk producing NaNs otherwise.
+ //
+ // We need to preserve -Inf values to maintain runtime/metrics
+ // conventions. We'll strip it out later.
+ if bucket == math.Inf(-1) {
+ newBuckets = append(newBuckets, bucket)
+ buckets = buckets[1:]
+ bucket = buckets[0]
+ }
+ // From now on, bucket should always have a non-Inf value because
+ // Infs are only ever at the ends of the bucket lists, so
+ // arithmetic operations on it are non-NaN.
+ for i := 1; i < len(buckets); i++ {
+ if bucket >= 0 && buckets[i] < bucket*base {
+ // The next bucket we want to include is at least bucket*base.
+ continue
+ } else if bucket < 0 && buckets[i] < bucket/base {
+ // In this case the bucket we're targeting is negative, and since
+ // we're ascending through buckets here, we need to divide to get
+ // closer to zero exponentially.
+ continue
+ }
+ // The +Inf bucket will always be the last one, and we'll always
+ // end up including it here because bucket
+ newBuckets = append(newBuckets, bucket)
+ bucket = buckets[i]
+ }
+ return append(newBuckets, bucket)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
index 351c26e1aed..6515c114804 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
@@ -19,18 +19,34 @@ import (
dto "github.com/prometheus/client_model/go"
)
-// metricSorter is a sortable slice of *dto.Metric.
-type metricSorter []*dto.Metric
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers.
+type LabelPairSorter []*dto.LabelPair
-func (s metricSorter) Len() int {
+func (s LabelPairSorter) Len() int {
return len(s)
}
-func (s metricSorter) Swap(i, j int) {
+func (s LabelPairSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
-func (s metricSorter) Less(i, j int) bool {
+func (s LabelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+// MetricSorter is a sortable slice of *dto.Metric.
+type MetricSorter []*dto.Metric
+
+func (s MetricSorter) Len() int {
+ return len(s)
+}
+
+func (s MetricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s MetricSorter) Less(i, j int) bool {
if len(s[i].Label) != len(s[j].Label) {
// This should not happen. The metrics are
// inconsistent. However, we have to deal with the fact, as
@@ -68,7 +84,7 @@ func (s metricSorter) Less(i, j int) bool {
// the slice, with the contained Metrics sorted within each MetricFamily.
func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
for _, mf := range metricFamiliesByName {
- sort.Sort(metricSorter(mf.Metric))
+ sort.Sort(MetricSorter(mf.Metric))
}
names := make([]string, 0, len(metricFamiliesByName))
for name, mf := range metricFamiliesByName {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
index 2744443ac22..c21911f292d 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -25,12 +25,111 @@ import (
// Labels represents a collection of label name -> value mappings. This type is
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
// metric vector Collectors, e.g.:
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
//
// The other use-case is the specification of constant label pairs in Opts or to
// create a Desc.
type Labels map[string]string
+// LabelConstraint normalizes label values.
+type LabelConstraint func(string) string
+
+// ConstrainedLabels represents a label name and its constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabel struct {
+ Name string
+ Constraint LabelConstraint
+}
+
+// ConstrainableLabels is an interface that allows creating of labels that can
+// be optionally constrained.
+//
+// prometheus.V2().NewCounterVec(CounterVecOpts{
+// CounterOpts: {...}, // Usual CounterOpts fields
+// VariableLabels: []ConstrainedLabels{
+// {Name: "A"},
+// {Name: "B", Constraint: func(v string) string { ... }},
+// },
+// })
+type ConstrainableLabels interface {
+ compile() *compiledLabels
+ labelNames() []string
+}
+
+// ConstrainedLabels represents a collection of label name -> constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabels []ConstrainedLabel
+
+func (cls ConstrainedLabels) compile() *compiledLabels {
+ compiled := &compiledLabels{
+ names: make([]string, len(cls)),
+ labelConstraints: map[string]LabelConstraint{},
+ }
+
+ for i, label := range cls {
+ compiled.names[i] = label.Name
+ if label.Constraint != nil {
+ compiled.labelConstraints[label.Name] = label.Constraint
+ }
+ }
+
+ return compiled
+}
+
+func (cls ConstrainedLabels) labelNames() []string {
+ names := make([]string, len(cls))
+ for i, label := range cls {
+ names[i] = label.Name
+ }
+ return names
+}
+
+// UnconstrainedLabels represents collection of label without any constraint on
+// their value. Thus, it is simply a collection of label names.
+//
+// UnconstrainedLabels([]string{ "A", "B" })
+//
+// is equivalent to
+//
+// ConstrainedLabels {
+// { Name: "A" },
+// { Name: "B" },
+// }
+type UnconstrainedLabels []string
+
+func (uls UnconstrainedLabels) compile() *compiledLabels {
+ return &compiledLabels{
+ names: uls,
+ }
+}
+
+func (uls UnconstrainedLabels) labelNames() []string {
+ return uls
+}
+
+type compiledLabels struct {
+ names []string
+ labelConstraints map[string]LabelConstraint
+}
+
+func (cls *compiledLabels) compile() *compiledLabels {
+ return cls
+}
+
+func (cls *compiledLabels) labelNames() []string {
+ return cls.names
+}
+
+func (cls *compiledLabels) constrain(labelName, value string) string {
+ if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil {
+ return fn(value)
+ }
+ return value
+}
+
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"
@@ -39,7 +138,7 @@ var errInconsistentCardinality = errors.New("inconsistent label cardinality")
func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
return fmt.Errorf(
- "%s: %q has %d variable labels named %q but %d values %q were provided",
+ "%w: %q has %d variable labels named %q but %d values %q were provided",
errInconsistentCardinality, fqName,
len(labels), labels,
len(labelValues), labelValues,
@@ -49,7 +148,7 @@ func makeInconsistentCardinalityError(fqName string, labels, labelValues []strin
func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
if len(labels) != expectedNumberOfValues {
return fmt.Errorf(
- "%s: expected %d label values but got %d in %#v",
+ "%w: expected %d label values but got %d in %#v",
errInconsistentCardinality, expectedNumberOfValues,
len(labels), labels,
)
@@ -66,8 +165,10 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
if len(vals) != expectedNumberOfValues {
+ // The call below makes vals escape, copy them to avoid that.
+ vals := append([]string(nil), vals...)
return fmt.Errorf(
- "%s: expected %d label values but got %d in %#v",
+ "%w: expected %d label values but got %d in %#v",
errInconsistentCardinality, expectedNumberOfValues,
len(vals), vals,
)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index dc121910a52..9d9b81ab448 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -14,14 +14,15 @@
package prometheus
import (
+ "errors"
+ "math"
+ "sort"
"strings"
"time"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/model"
-
dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+ "google.golang.org/protobuf/proto"
)
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
@@ -91,6 +92,9 @@ type Opts struct {
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels
+
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
}
// BuildFQName joins the given three name components by "_". Empty name
@@ -115,22 +119,6 @@ func BuildFQName(namespace, subsystem, name string) string {
return name
}
-// labelPairSorter implements sort.Interface. It is used to sort a slice of
-// dto.LabelPair pointers.
-type labelPairSorter []*dto.LabelPair
-
-func (s labelPairSorter) Len() int {
- return len(s)
-}
-
-func (s labelPairSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s labelPairSorter) Less(i, j int) bool {
- return s[i].GetName() < s[j].GetName()
-}
-
type invalidMetric struct {
desc *Desc
err error
@@ -174,3 +162,96 @@ func (m timestampedMetric) Write(pb *dto.Metric) error {
func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
return timestampedMetric{Metric: m, t: t}
}
+
+type withExemplarsMetric struct {
+ Metric
+
+ exemplars []*dto.Exemplar
+}
+
+func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
+ if err := m.Metric.Write(pb); err != nil {
+ return err
+ }
+
+ switch {
+ case pb.Counter != nil:
+ pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
+ case pb.Histogram != nil:
+ for _, e := range m.exemplars {
+ // pb.Histogram.Bucket are sorted by UpperBound.
+ i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool {
+ return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue()
+ })
+ if i < len(pb.Histogram.Bucket) {
+ pb.Histogram.Bucket[i].Exemplar = e
+ } else {
+ // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
+ b := &dto.Bucket{
+ CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()),
+ UpperBound: proto.Float64(math.Inf(1)),
+ Exemplar: e,
+ }
+ pb.Histogram.Bucket = append(pb.Histogram.Bucket, b)
+ }
+ }
+ default:
+ // TODO(bwplotka): Implement Gauge?
+ return errors.New("cannot inject exemplar into Gauge, Summary or Untyped")
+ }
+
+ return nil
+}
+
+// Exemplar is easier to use, user-facing representation of *dto.Exemplar.
+type Exemplar struct {
+ Value float64
+ Labels Labels
+ // Optional.
+ // Default value (time.Time{}) indicates its empty, which should be
+ // understood as time.Now() time at the moment of creation of metric.
+ Timestamp time.Time
+}
+
+// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given
+// exemplars. Exemplars are validated.
+//
+// Only last applicable exemplar is injected from the list.
+// For example for Counter it means last exemplar is injected.
+// For Histogram, it means last applicable exemplar for each bucket is injected.
+//
+// NewMetricWithExemplars works best with MustNewConstMetric and
+// MustNewConstHistogram, see example.
+func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
+ if len(exemplars) == 0 {
+ return nil, errors.New("no exemplar was passed for NewMetricWithExemplars")
+ }
+
+ var (
+ now = time.Now()
+ exs = make([]*dto.Exemplar, len(exemplars))
+ err error
+ )
+ for i, e := range exemplars {
+ ts := e.Timestamp
+ if ts.IsZero() {
+ ts = now
+ }
+ exs[i], err = newExemplar(e.Value, ts, e.Labels)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &withExemplarsMetric{Metric: m, exemplars: exs}, nil
+}
+
+// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where
+// NewMetricWithExemplars would have returned an error.
+func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric {
+ ret, err := NewMetricWithExemplars(m, exemplars...)
+ if err != nil {
+ panic(err)
+ }
+ return ret
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
new file mode 100644
index 00000000000..7c12b210870
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
@@ -0,0 +1,25 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !js || wasm
+// +build !js wasm
+
+package prometheus
+
+import "runtime"
+
+// getRuntimeNumThreads returns the number of open OS threads.
+func getRuntimeNumThreads() float64 {
+ n, _ := runtime.ThreadCreateProfile(nil)
+ return float64(n)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
new file mode 100644
index 00000000000..7348df01dfb
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
@@ -0,0 +1,22 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js && !wasm
+// +build js,!wasm
+
+package prometheus
+
+// getRuntimeNumThreads returns the number of open OS threads.
+func getRuntimeNumThreads() float64 {
+ return 1
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
index 44128016fd1..03773b21f75 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
@@ -58,7 +58,7 @@ type ObserverVec interface {
// current time as timestamp, and the provided Labels. Empty Labels will lead to
// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
// left in place. ObserveWithExemplar panics if any of the provided labels are
-// invalid or if the provided labels contain more than 64 runes in total.
+// invalid or if the provided labels contain more than 128 runes in total.
type ExemplarObserver interface {
ObserveWithExemplar(value float64, exemplar Labels)
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
index 5bfe0ff5bbc..62a4e7ad9a0 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -16,21 +16,21 @@ package prometheus
import (
"errors"
"fmt"
- "io/ioutil"
"os"
"strconv"
"strings"
)
type processCollector struct {
- collectFn func(chan<- Metric)
- pidFn func() (int, error)
- reportErrors bool
- cpuTotal *Desc
- openFDs, maxFDs *Desc
- vsize, maxVsize *Desc
- rss *Desc
- startTime *Desc
+ collectFn func(chan<- Metric)
+ pidFn func() (int, error)
+ reportErrors bool
+ cpuTotal *Desc
+ openFDs, maxFDs *Desc
+ vsize, maxVsize *Desc
+ rss *Desc
+ startTime *Desc
+ inBytes, outBytes *Desc
}
// ProcessCollectorOpts defines the behavior of a process metrics collector
@@ -101,11 +101,20 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
"Start time of the process since unix epoch in seconds.",
nil, nil,
),
+ inBytes: NewDesc(
+ ns+"process_network_receive_bytes_total",
+ "Number of bytes received by the process over the network.",
+ nil, nil,
+ ),
+ outBytes: NewDesc(
+ ns+"process_network_transmit_bytes_total",
+ "Number of bytes sent by the process over the network.",
+ nil, nil,
+ ),
}
if opts.PidFn == nil {
- pid := os.Getpid()
- c.pidFn = func() (int, error) { return pid, nil }
+ c.pidFn = getPIDFn()
} else {
c.pidFn = opts.PidFn
}
@@ -131,6 +140,8 @@ func (c *processCollector) Describe(ch chan<- *Desc) {
ch <- c.maxVsize
ch <- c.rss
ch <- c.startTime
+ ch <- c.inBytes
+ ch <- c.outBytes
}
// Collect returns the current state of all metrics of the collector.
@@ -152,13 +163,13 @@ func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error)
// It is meant to be used for the PidFn field in ProcessCollectorOpts.
func NewPidFileFn(pidFilePath string) func() (int, error) {
return func() (int, error) {
- content, err := ioutil.ReadFile(pidFilePath)
+ content, err := os.ReadFile(pidFilePath)
if err != nil {
- return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err)
+ return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err)
}
pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
if err != nil {
- return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err)
+ return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err)
}
return pid, nil
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
new file mode 100644
index 00000000000..b1e363d6cf6
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go
@@ -0,0 +1,26 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js
+// +build js
+
+package prometheus
+
+func canCollectProcess() bool {
+ return false
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ // noop on this platform
+ return
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
index 3117461cde7..14d56d2d068 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
@@ -11,7 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build !windows
+//go:build !windows && !js && !wasip1
+// +build !windows,!js,!wasip1
package prometheus
@@ -62,4 +63,18 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
} else {
c.reportError(ch, nil, err)
}
+
+ if netstat, err := p.Netstat(); err == nil {
+ var inOctets, outOctets float64
+ if netstat.IpExt.InOctets != nil {
+ inOctets = *netstat.IpExt.InOctets
+ }
+ if netstat.IpExt.OutOctets != nil {
+ outOctets = *netstat.IpExt.OutOctets
+ }
+ ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets)
+ ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets)
+ } else {
+ c.reportError(ch, nil, err)
+ }
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
new file mode 100644
index 00000000000..d8d9a6d7a2f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
@@ -0,0 +1,26 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build wasip1
+// +build wasip1
+
+package prometheus
+
+func canCollectProcess() bool {
+ return false
+}
+
+func (*processCollector) processCollect(chan<- Metric) {
+ // noop on this platform
+ return
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
index e7c0d05464f..315eab5f179 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -76,16 +76,25 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) {
return n, err
}
-type closeNotifierDelegator struct{ *responseWriterDelegator }
-type flusherDelegator struct{ *responseWriterDelegator }
-type hijackerDelegator struct{ *responseWriterDelegator }
-type readerFromDelegator struct{ *responseWriterDelegator }
-type pusherDelegator struct{ *responseWriterDelegator }
+// Unwrap lets http.ResponseController get the underlying http.ResponseWriter,
+// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface.
+func (r *responseWriterDelegator) Unwrap() http.ResponseWriter {
+ return r.ResponseWriter
+}
+
+type (
+ closeNotifierDelegator struct{ *responseWriterDelegator }
+ flusherDelegator struct{ *responseWriterDelegator }
+ hijackerDelegator struct{ *responseWriterDelegator }
+ readerFromDelegator struct{ *responseWriterDelegator }
+ pusherDelegator struct{ *responseWriterDelegator }
+)
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
//nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
+
func (d flusherDelegator) Flush() {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
@@ -94,9 +103,11 @@ func (d flusherDelegator) Flush() {
}
d.ResponseWriter.(http.Flusher).Flush()
}
+
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return d.ResponseWriter.(http.Hijacker).Hijack()
}
+
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
@@ -107,6 +118,7 @@ func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
d.written += n
return n, err
}
+
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
return d.ResponseWriter.(http.Pusher).Push(target, opts)
}
@@ -261,7 +273,7 @@ func init() {
http.Flusher
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
- pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
+ pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23
return struct {
*responseWriterDelegator
http.Pusher
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index d86d0cf4b0e..e598e66e688 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -33,24 +33,40 @@ package promhttp
import (
"compress/gzip"
+ "errors"
"fmt"
"io"
"net/http"
- "strings"
+ "strconv"
"sync"
"time"
+ "github.com/klauspost/compress/zstd"
"github.com/prometheus/common/expfmt"
+ "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil"
"github.com/prometheus/client_golang/prometheus"
)
const (
- contentTypeHeader = "Content-Type"
- contentEncodingHeader = "Content-Encoding"
- acceptEncodingHeader = "Accept-Encoding"
+ contentTypeHeader = "Content-Type"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+ processStartTimeHeader = "Process-Start-Time-Unix"
)
+// Compression represents the content encodings handlers support for the HTTP
+// responses.
+type Compression string
+
+const (
+ Identity Compression = "identity"
+ Gzip Compression = "gzip"
+ Zstd Compression = "zstd"
+)
+
+var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd}
+
var gzipPool = sync.Pool{
New: func() interface{} {
return gzip.NewWriter(nil)
@@ -84,6 +100,13 @@ func Handler() http.Handler {
// instrumentation. Use the InstrumentMetricHandler function to apply the same
// kind of instrumentation as it is used by the Handler function.
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
+ return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts)
+}
+
+// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which
+// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after
+// call to `done` of that `Gather`.
+func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler {
var (
inFlightSem chan struct{}
errCnt = prometheus.NewCounterVec(
@@ -103,7 +126,8 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
errCnt.WithLabelValues("gathering")
errCnt.WithLabelValues("encoding")
if err := opts.Registry.Register(errCnt); err != nil {
- if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ are := &prometheus.AlreadyRegisteredError{}
+ if errors.As(err, are) {
errCnt = are.ExistingCollector.(*prometheus.CounterVec)
} else {
panic(err)
@@ -111,7 +135,22 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
}
}
+ // Select compression formats to offer based on default or user choice.
+ var compressions []string
+ if !opts.DisableCompression {
+ offers := defaultCompressionFormats
+ if len(opts.OfferedCompressions) > 0 {
+ offers = opts.OfferedCompressions
+ }
+ for _, comp := range offers {
+ compressions = append(compressions, string(comp))
+ }
+ }
+
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
+ if !opts.ProcessStartTime.IsZero() {
+ rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
+ }
if inFlightSem != nil {
select {
case inFlightSem <- struct{}{}: // All good, carry on.
@@ -123,7 +162,8 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
return
}
}
- mfs, err := reg.Gather()
+ mfs, done, err := reg.Gather()
+ defer done()
if err != nil {
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error gathering metrics:", err)
@@ -150,21 +190,23 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
} else {
contentType = expfmt.Negotiate(req.Header)
}
- header := rsp.Header()
- header.Set(contentTypeHeader, string(contentType))
+ rsp.Header().Set(contentTypeHeader, string(contentType))
- w := io.Writer(rsp)
- if !opts.DisableCompression && gzipAccepted(req.Header) {
- header.Set(contentEncodingHeader, "gzip")
- gz := gzipPool.Get().(*gzip.Writer)
- defer gzipPool.Put(gz)
+ w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions)
+ if err != nil {
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error getting writer", err)
+ }
+ w = io.Writer(rsp)
+ encodingHeader = string(Identity)
+ }
- gz.Reset(w)
- defer gz.Close()
+ defer closeWriter()
- w = gz
+ // Set Content-Encoding only when data is compressed
+ if encodingHeader != string(Identity) {
+ rsp.Header().Set(contentEncodingHeader, encodingHeader)
}
-
enc := expfmt.NewEncoder(w, contentType)
// handleError handles the error according to opts.ErrorHandling
@@ -242,7 +284,8 @@ func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) ht
cnt.WithLabelValues("500")
cnt.WithLabelValues("503")
if err := reg.Register(cnt); err != nil {
- if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ are := &prometheus.AlreadyRegisteredError{}
+ if errors.As(err, are) {
cnt = are.ExistingCollector.(*prometheus.CounterVec)
} else {
panic(err)
@@ -254,7 +297,8 @@ func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) ht
Help: "Current number of scrapes being served.",
})
if err := reg.Register(gge); err != nil {
- if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ are := &prometheus.AlreadyRegisteredError{}
+ if errors.As(err, are) {
gge = are.ExistingCollector.(prometheus.Gauge)
} else {
panic(err)
@@ -326,9 +370,19 @@ type HandlerOpts struct {
// no effect on the HTTP status code because ErrorHandling is set to
// ContinueOnError.
Registry prometheus.Registerer
- // If DisableCompression is true, the handler will never compress the
- // response, even if requested by the client.
+ // DisableCompression disables the response encoding (compression) and
+ // encoding negotiation. If true, the handler will
+ // never compress the response, even if requested
+ // by the client and the OfferedCompressions field is set.
DisableCompression bool
+ // OfferedCompressions is a set of encodings (compressions) handler will
+ // try to offer when negotiating with the client. This defaults to identity, gzip
+ // and zstd.
+ // NOTE: If handler can't agree with the client on the encodings or
+ // unsupported or empty encodings are set in OfferedCompressions,
+ // handler always fallbacks to no compression (identity), for
+ // compatibility reasons. In such cases ErrorLog will be used if set.
+ OfferedCompressions []Compression
// The number of concurrent HTTP requests is limited to
// MaxRequestsInFlight. Additional requests are responded to with 503
// Service Unavailable and a suitable message in the body. If
@@ -354,19 +408,14 @@ type HandlerOpts struct {
// (which changes the identity of the resulting series on the Prometheus
// server).
EnableOpenMetrics bool
-}
-
-// gzipAccepted returns whether the client will accept gzip-encoded content.
-func gzipAccepted(header http.Header) bool {
- a := header.Get(acceptEncodingHeader)
- parts := strings.Split(a, ",")
- for _, part := range parts {
- part = strings.TrimSpace(part)
- if part == "gzip" || strings.HasPrefix(part, "gzip;") {
- return true
- }
- }
- return false
+ // ProcessStartTime allows setting process start timevalue that will be exposed
+ // with "Process-Start-Time-Unix" response header along with the metrics
+ // payload. This allow callers to have efficient transformations to cumulative
+ // counters (e.g. OpenTelemetry) or generally _created timestamp estimation per
+ // scrape target.
+ // NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus
+ // exposition format.
+ ProcessStartTime time.Time
}
// httpError removes any content-encoding header and then calls http.Error with
@@ -381,3 +430,38 @@ func httpError(rsp http.ResponseWriter, err error) {
http.StatusInternalServerError,
)
}
+
+// negotiateEncodingWriter reads the Accept-Encoding header from a request and
+// selects the right compression based on an allow-list of supported
+// compressions. It returns a writer implementing the compression and an the
+// correct value that the caller can set in the response header.
+func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) {
+ if len(compressions) == 0 {
+ return rw, string(Identity), func() {}, nil
+ }
+
+ // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented.
+ selected := httputil.NegotiateContentEncoding(r, compressions)
+
+ switch selected {
+ case "zstd":
+ // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented.
+ z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest))
+ if err != nil {
+ return nil, "", func() {}, err
+ }
+
+ z.Reset(rw)
+ return z, selected, func() { _ = z.Close() }, nil
+ case "gzip":
+ gz := gzipPool.Get().(*gzip.Writer)
+ gz.Reset(rw)
+ return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil
+ case "identity":
+ // This means the content is not compressed.
+ return rw, selected, func() {}, nil
+ default:
+ // The content encoding was not implemented yet.
+ return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
index 861b4d21cac..d3482c40ca7 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -38,11 +38,11 @@ func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
- return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ return func(r *http.Request) (*http.Response, error) {
gauge.Inc()
defer gauge.Dec()
return next.RoundTrip(r)
- })
+ }
}
// InstrumentRoundTripperCounter is a middleware that wraps the provided
@@ -59,22 +59,29 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp
// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
// is not incremented.
//
+// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests.
+//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
- rtOpts := &option{}
+ rtOpts := defaultOptions()
for _, o := range opts {
- o(rtOpts)
+ o.apply(rtOpts)
}
- code, method := checkLabels(counter)
+ // Curry the counter with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels()))
- return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ return func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r)
if err == nil {
- counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc()
+ l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
+ for label, resolve := range rtOpts.extraLabelsFromCtx {
+ l[label] = resolve(resp.Request.Context())
+ }
+ addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context()))
}
return resp, err
- })
+ }
}
// InstrumentRoundTripperDuration is a middleware that wraps the provided
@@ -94,24 +101,31 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
// If the wrapped RoundTripper panics or returns a non-nil error, no values are
// reported.
//
+// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms.
+//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
- rtOpts := &option{}
+ rtOpts := defaultOptions()
for _, o := range opts {
- o(rtOpts)
+ o.apply(rtOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels()))
- return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ return func(r *http.Request) (*http.Response, error) {
start := time.Now()
resp, err := next.RoundTrip(r)
if err == nil {
- obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds())
+ l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
+ for label, resolve := range rtOpts.extraLabelsFromCtx {
+ l[label] = resolve(resp.Request.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()))
}
return resp, err
- })
+ }
}
// InstrumentTrace is used to offer flexibility in instrumenting the available
@@ -149,7 +163,7 @@ type InstrumentTrace struct {
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
- return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ return func(r *http.Request) (*http.Response, error) {
start := time.Now()
trace := &httptrace.ClientTrace{
@@ -231,5 +245,5 @@ func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) Ro
r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
return next.RoundTrip(r)
- })
+ }
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
index a23f0edc6f8..356edb7868c 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -28,6 +28,26 @@ import (
// magicString is used for the hacky label test in checkLabels. Remove once fixed.
const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
+// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver],
+// which falls back to [prometheus.Observer.Observe] if no labels are provided.
+func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) {
+ if labels == nil {
+ obs.Observe(val)
+ return
+ }
+ obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels)
+}
+
+// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar],
+// which falls back to [prometheus.Counter.Add] if no labels are provided.
+func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) {
+ if labels == nil {
+ obs.Add(val)
+ return
+ }
+ obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels)
+}
+
// InstrumentHandlerInFlight is a middleware that wraps the provided
// http.Handler. It sets the provided prometheus.Gauge to the number of
// requests currently handled by the wrapped http.Handler.
@@ -48,7 +68,7 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
// names are "code" and "method". The function panics otherwise. For the "method"
// label a predefined default label value set is used to filter given values.
// Values besides predefined values will count as `unknown` method.
-//`WithExtraMethods` can be used to add more methods to the set. The Observe
+// `WithExtraMethods` can be used to add more methods to the set. The Observe
// method of the Observer in the ObserverVec is called with the request duration
// in seconds. Partitioning happens by HTTP status code and/or HTTP method if
// the respective instance label names are present in the ObserverVec. For
@@ -62,28 +82,37 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
- mwOpts := &option{}
+ hOpts := defaultOptions()
for _, o := range opts {
- o(mwOpts)
+ o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
if code {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
- })
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
+ }
}
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
next.ServeHTTP(w, r)
- obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
- })
+ l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
+ }
}
// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
@@ -104,25 +133,36 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc {
- mwOpts := &option{}
+ hOpts := defaultOptions()
for _, o := range opts {
- o(mwOpts)
+ o.apply(hOpts)
}
- code, method := checkLabels(counter)
+ // Curry the counter with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels()))
if code {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc()
- })
+
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
+ }
}
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
- counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc()
- })
+
+ l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
+ }
}
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
@@ -148,20 +188,25 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
- mwOpts := &option{}
+ hOpts := defaultOptions()
for _, o := range opts {
- o(mwOpts)
+ o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, func(status int) {
- obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
+ l := labels(code, method, r.Method, status, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
})
next.ServeHTTP(d, r)
- })
+ }
}
// InstrumentHandlerRequestSize is a middleware that wraps the provided
@@ -184,27 +229,38 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
- mwOpts := &option{}
+ hOpts := defaultOptions()
for _, o := range opts {
- o(mwOpts)
+ o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
if code {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r)
- obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size))
- })
+
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
+ }
}
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r)
- obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size))
- })
+
+ l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
+ }
}
// InstrumentHandlerResponseSize is a middleware that wraps the provided
@@ -227,17 +283,23 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler {
- mwOpts := &option{}
+ hOpts := defaultOptions()
for _, o := range opts {
- o(mwOpts)
+ o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written()))
+
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context()))
})
}
@@ -246,7 +308,7 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
// Collector does not have a Desc or has more than one Desc or its Desc is
// invalid. It also panics if the Collector has any non-const, non-curried
// labels that are not named "code" or "method".
-func checkLabels(c prometheus.Collector) (code bool, method bool) {
+func checkLabels(c prometheus.Collector) (code, method bool) {
// TODO(beorn7): Remove this hacky way to check for instance labels
// once Descriptors can have their dimensionality queried.
var (
@@ -327,15 +389,12 @@ func isLabelCurried(c prometheus.Collector, label string) bool {
return true
}
-// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
-// unnecessary allocations on each request.
-var emptyLabels = prometheus.Labels{}
-
func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
+ labels := prometheus.Labels{}
+
if !(code || method) {
- return emptyLabels
+ return labels
}
- labels := prometheus.Labels{}
if code {
labels["code"] = sanitizeCode(status)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
index 35e41bd1e6b..5d4383aa14a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
@@ -13,19 +13,72 @@
package promhttp
-// Option are used to configure a middleware or round tripper..
-type Option func(*option)
+import (
+ "context"
-type option struct {
- extraMethods []string
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// Option are used to configure both handler (middleware) or round tripper.
+type Option interface {
+ apply(*options)
+}
+
+// LabelValueFromCtx are used to compute the label value from request context.
+// Context can be filled with values from request through middleware.
+type LabelValueFromCtx func(ctx context.Context) string
+
+// options store options for both a handler or round tripper.
+type options struct {
+ extraMethods []string
+ getExemplarFn func(requestCtx context.Context) prometheus.Labels
+ extraLabelsFromCtx map[string]LabelValueFromCtx
+}
+
+func defaultOptions() *options {
+ return &options{
+ getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil },
+ extraLabelsFromCtx: map[string]LabelValueFromCtx{},
+ }
}
+func (o *options) emptyDynamicLabels() prometheus.Labels {
+ labels := prometheus.Labels{}
+
+ for label := range o.extraLabelsFromCtx {
+ labels[label] = ""
+ }
+
+ return labels
+}
+
+type optionApplyFunc func(*options)
+
+func (o optionApplyFunc) apply(opt *options) { o(opt) }
+
// WithExtraMethods adds additional HTTP methods to the list of allowed methods.
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list.
//
// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage.
func WithExtraMethods(methods ...string) Option {
- return func(o *option) {
+ return optionApplyFunc(func(o *options) {
o.extraMethods = methods
- }
+ })
+}
+
+// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics.
+// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but
+// metric will continue to observe/increment.
+func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
+ return optionApplyFunc(func(o *options) {
+ o.getExemplarFn = getExemplarFn
+ })
+}
+
+// WithLabelFromCtx registers a label for dynamic resolution with access to context.
+// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage
+func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option {
+ return optionApplyFunc(func(o *options) {
+ o.extraLabelsFromCtx[name] = valueFn
+ })
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 383a7f5941a..c6fd2f58b74 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -15,24 +15,23 @@ package prometheus
import (
"bytes"
+ "errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
"sort"
+ "strconv"
"strings"
"sync"
"unicode/utf8"
- "github.com/cespare/xxhash/v2"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/expfmt"
+ "github.com/prometheus/client_golang/prometheus/internal"
+ "github.com/cespare/xxhash/v2"
dto "github.com/prometheus/client_model/go"
-
- "github.com/prometheus/client_golang/prometheus/internal"
+ "github.com/prometheus/common/expfmt"
+ "google.golang.org/protobuf/proto"
)
const (
@@ -252,9 +251,12 @@ func (errs MultiError) MaybeUnwrap() error {
}
// Registry registers Prometheus collectors, collects their metrics, and gathers
-// them into MetricFamilies for exposition. It implements both Registerer and
-// Gatherer. The zero value is not usable. Create instances with NewRegistry or
-// NewPedanticRegistry.
+// them into MetricFamilies for exposition. It implements Registerer, Gatherer,
+// and Collector. The zero value is not usable. Create instances with
+// NewRegistry or NewPedanticRegistry.
+//
+// Registry implements Collector to allow it to be used for creating groups of
+// metrics. See the Grouping example for how this can be done.
type Registry struct {
mtx sync.RWMutex
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
@@ -289,7 +291,7 @@ func (r *Registry) Register(c Collector) error {
// Is the descriptor valid at all?
if desc.err != nil {
- return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err)
}
// Is the descID unique?
@@ -312,16 +314,17 @@ func (r *Registry) Register(c Collector) error {
if dimHash != desc.dimHash {
return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
}
- } else {
- // ...then check the new descriptors already seen.
- if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
- if dimHash != desc.dimHash {
- return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
- }
- } else {
- newDimHashesByName[desc.fqName] = desc.dimHash
+ continue
+ }
+
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
}
+ continue
}
+ newDimHashesByName[desc.fqName] = desc.dimHash
}
// A Collector yielding no Desc at all is considered unchecked.
if len(newDescIDs) == 0 {
@@ -407,6 +410,14 @@ func (r *Registry) MustRegister(cs ...Collector) {
// Gather implements Gatherer.
func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
+ r.mtx.RLock()
+
+ if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 {
+ // Fast path.
+ r.mtx.RUnlock()
+ return nil, nil
+ }
+
var (
checkedMetricChan = make(chan Metric, capMetricChan)
uncheckedMetricChan = make(chan Metric, capMetricChan)
@@ -416,7 +427,6 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
)
- r.mtx.RLock()
goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
checkedCollectors := make(chan Collector, len(r.collectorsByID))
@@ -539,7 +549,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
goroutineBudget--
runtime.Gosched()
}
- // Once both checkedMetricChan and uncheckdMetricChan are closed
+ // Once both checkedMetricChan and uncheckedMetricChan are closed
// and drained, the contraption above will nil out cmc and umc,
// and then we can leave the collect loop here.
if cmc == nil && umc == nil {
@@ -549,6 +559,31 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
}
+// Describe implements Collector.
+func (r *Registry) Describe(ch chan<- *Desc) {
+ r.mtx.RLock()
+ defer r.mtx.RUnlock()
+
+ // Only report the checked Collectors; unchecked collectors don't report any
+ // Desc.
+ for _, c := range r.collectorsByID {
+ c.Describe(ch)
+ }
+}
+
+// Collect implements Collector.
+func (r *Registry) Collect(ch chan<- Metric) {
+ r.mtx.RLock()
+ defer r.mtx.RUnlock()
+
+ for _, c := range r.collectorsByID {
+ c.Collect(ch)
+ }
+ for _, c := range r.uncheckedCollectors {
+ c.Collect(ch)
+ }
+}
+
// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
// Prometheus text format, and writes it to a temporary file. Upon success, the
// temporary file is renamed to the provided filename.
@@ -556,7 +591,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
// This is intended for use with the textfile collector of the node exporter.
// Note that the node exporter expects the filename to be suffixed with ".prom".
func WriteToTextfile(filename string, g Gatherer) error {
- tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
+ tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
if err != nil {
return err
}
@@ -575,7 +610,7 @@ func WriteToTextfile(filename string, g Gatherer) error {
return err
}
- if err := os.Chmod(tmp.Name(), 0644); err != nil {
+ if err := os.Chmod(tmp.Name(), 0o644); err != nil {
return err
}
return os.Rename(tmp.Name(), filename)
@@ -596,7 +631,7 @@ func processMetric(
}
dtoMetric := &dto.Metric{}
if err := metric.Write(dtoMetric); err != nil {
- return fmt.Errorf("error collecting metric %v: %s", desc, err)
+ return fmt.Errorf("error collecting metric %v: %w", desc, err)
}
metricFamily, ok := metricFamiliesByName[desc.fqName]
if ok { // Existing name.
@@ -718,12 +753,13 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
for i, g := range gs {
mfs, err := g.Gather()
if err != nil {
- if multiErr, ok := err.(MultiError); ok {
+ multiErr := MultiError{}
+ if errors.As(err, &multiErr) {
for _, err := range multiErr {
- errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
}
} else {
- errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
}
}
for _, mf := range mfs {
@@ -884,11 +920,11 @@ func checkMetricConsistency(
h.Write(separatorByteSlice)
// Make sure label pairs are sorted. We depend on it for the consistency
// check.
- if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
+ if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) {
// We cannot sort dtoMetric.Label in place as it is immutable by contract.
copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
copy(copiedLabels, dtoMetric.Label)
- sort.Sort(labelPairSorter(copiedLabels))
+ sort.Sort(internal.LabelPairSorter(copiedLabels))
dtoMetric.Label = copiedLabels
}
for _, lp := range dtoMetric.Label {
@@ -897,6 +933,10 @@ func checkMetricConsistency(
h.WriteString(lp.GetValue())
h.Write(separatorByteSlice)
}
+ if dtoMetric.TimestampMs != nil {
+ h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10))
+ h.Write(separatorByteSlice)
+ }
hSum := h.Sum64()
if _, exists := metricHashes[hSum]; exists {
return fmt.Errorf(
@@ -924,7 +964,7 @@ func checkDescConsistency(
// Is the desc consistent with the content of the metric?
lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
copy(lpsFromDesc, desc.constLabelPairs)
- for _, l := range desc.variableLabels {
+ for _, l := range desc.variableLabels.names {
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
Name: proto.String(l),
})
@@ -935,7 +975,7 @@ func checkDescConsistency(
metricFamily.GetName(), dtoMetric, desc,
)
}
- sort.Sort(labelPairSorter(lpsFromDesc))
+ sort.Sort(internal.LabelPairSorter(lpsFromDesc))
for i, lpFromDesc := range lpsFromDesc {
lpFromMetric := dtoMetric.Label[i]
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
@@ -948,3 +988,89 @@ func checkDescConsistency(
}
return nil
}
+
+var _ TransactionalGatherer = &MultiTRegistry{}
+
+// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple
+// transactional gatherers.
+//
+// It is caller responsibility to ensure two registries have mutually exclusive metric families,
+// no deduplication will happen.
+type MultiTRegistry struct {
+ tGatherers []TransactionalGatherer
+}
+
+// NewMultiTRegistry creates MultiTRegistry.
+func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry {
+ return &MultiTRegistry{
+ tGatherers: tGatherers,
+ }
+}
+
+// Gather implements TransactionalGatherer interface.
+func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) {
+ errs := MultiError{}
+
+ dFns := make([]func(), 0, len(r.tGatherers))
+ // TODO(bwplotka): Implement concurrency for those?
+ for _, g := range r.tGatherers {
+ // TODO(bwplotka): Check for duplicates?
+ m, d, err := g.Gather()
+ errs.Append(err)
+
+ mfs = append(mfs, m...)
+ dFns = append(dFns, d)
+ }
+
+ // TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already.
+ sort.Slice(mfs, func(i, j int) bool {
+ return *mfs[i].Name < *mfs[j].Name
+ })
+ return mfs, func() {
+ for _, d := range dFns {
+ d()
+ }
+ }, errs.MaybeUnwrap()
+}
+
+// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory
+// used by metric family is no longer used by a caller. This allows implementations with cache.
+type TransactionalGatherer interface {
+ // Gather returns metrics in a lexicographically sorted slice
+ // of uniquely named MetricFamily protobufs. Gather ensures that the
+ // returned slice is valid and self-consistent so that it can be used
+ // for valid exposition. As an exception to the strict consistency
+ // requirements described for metric.Desc, Gather will tolerate
+ // different sets of label names for metrics of the same metric family.
+ //
+ // Even if an error occurs, Gather attempts to gather as many metrics as
+ // possible. Hence, if a non-nil error is returned, the returned
+ // MetricFamily slice could be nil (in case of a fatal error that
+ // prevented any meaningful metric collection) or contain a number of
+ // MetricFamily protobufs, some of which might be incomplete, and some
+ // might be missing altogether. The returned error (which might be a
+ // MultiError) explains the details. Note that this is mostly useful for
+ // debugging purposes. If the gathered protobufs are to be used for
+ // exposition in actual monitoring, it is almost always better to not
+ // expose an incomplete result and instead disregard the returned
+ // MetricFamily protobufs in case the returned error is non-nil.
+ //
+ // Important: done is expected to be triggered (even if the error occurs!)
+ // once caller does not need returned slice of dto.MetricFamily.
+ Gather() (_ []*dto.MetricFamily, done func(), err error)
+}
+
+// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function.
+func ToTransactionalGatherer(g Gatherer) TransactionalGatherer {
+ return &noTransactionGatherer{g: g}
+}
+
+type noTransactionGatherer struct {
+ g Gatherer
+}
+
+// Gather implements TransactionalGatherer interface.
+func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) {
+ mfs, err := g.g.Gather()
+ return mfs, func() {}, err
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index c5fa8ed7c71..1ab0e479655 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -22,11 +22,11 @@ import (
"sync/atomic"
"time"
- "github.com/beorn7/perks/quantile"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
-
dto "github.com/prometheus/client_model/go"
+
+ "github.com/beorn7/perks/quantile"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
// quantileLabel is used for the label that defines the quantile in a
@@ -146,6 +146,21 @@ type SummaryOpts struct {
// is the internal buffer size of the underlying package
// "github.com/bmizerany/perks/quantile").
BufCap uint32
+
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
+}
+
+// SummaryVecOpts bundles the options to create a SummaryVec metric.
+// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type SummaryVecOpts struct {
+ SummaryOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Constraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
}
// Problem with the sliding-window decay algorithm... The Merge method of
@@ -177,11 +192,11 @@ func NewSummary(opts SummaryOpts) Summary {
}
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
- if len(desc.variableLabels) != len(labelValues) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
+ if len(desc.variableLabels.names) != len(labelValues) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
}
- for _, n := range desc.variableLabels {
+ for _, n := range desc.variableLabels.names {
if n == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
@@ -211,6 +226,9 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
opts.BufCap = DefBufCap
}
+ if opts.now == nil {
+ opts.now = time.Now
+ }
if len(opts.Objectives) == 0 {
// Use the lock-free implementation of a Summary without objectives.
s := &noObjectivesSummary{
@@ -219,6 +237,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
counts: [2]*summaryCounts{{}, {}},
}
s.init(s) // Init self-collection.
+ s.createdTs = timestamppb.New(opts.now())
return s
}
@@ -234,7 +253,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
coldBuf: make([]float64, 0, opts.BufCap),
streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
}
- s.headStreamExpTime = time.Now().Add(s.streamDuration)
+ s.headStreamExpTime = opts.now().Add(s.streamDuration)
s.hotBufExpTime = s.headStreamExpTime
for i := uint32(0); i < opts.AgeBuckets; i++ {
@@ -248,6 +267,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
sort.Float64s(s.sortedObjectives)
s.init(s) // Init self-collection.
+ s.createdTs = timestamppb.New(opts.now())
return s
}
@@ -275,6 +295,8 @@ type summary struct {
headStream *quantile.Stream
headStreamIdx int
headStreamExpTime, hotBufExpTime time.Time
+
+ createdTs *timestamppb.Timestamp
}
func (s *summary) Desc() *Desc {
@@ -296,7 +318,9 @@ func (s *summary) Observe(v float64) {
}
func (s *summary) Write(out *dto.Metric) error {
- sum := &dto.Summary{}
+ sum := &dto.Summary{
+ CreatedTimestamp: s.createdTs,
+ }
qs := make([]*dto.Quantile, 0, len(s.objectives))
s.bufMtx.Lock()
@@ -429,6 +453,8 @@ type noObjectivesSummary struct {
counts [2]*summaryCounts
labelPairs []*dto.LabelPair
+
+ createdTs *timestamppb.Timestamp
}
func (s *noObjectivesSummary) Desc() *Desc {
@@ -479,8 +505,9 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error {
}
sum := &dto.Summary{
- SampleCount: proto.Uint64(count),
- SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ SampleCount: proto.Uint64(count),
+ SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ CreatedTimestamp: s.createdTs,
}
out.Summary = sum
@@ -530,20 +557,28 @@ type SummaryVec struct {
// it is handled by the Prometheus server internally, “quantile” is an illegal
// label name. NewSummaryVec will panic if this label name is used.
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
- for _, ln := range labelNames {
+ return V2.NewSummaryVec(SummaryVecOpts{
+ SummaryOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
+func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
+ for _, ln := range opts.VariableLabels.labelNames() {
if ln == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
- desc := NewDesc(
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &SummaryVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- return newSummary(desc, opts, lvs...)
+ return newSummary(desc, opts.SummaryOpts, lvs...)
}),
}
}
@@ -603,7 +638,8 @@ func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Observe(42.21)
+//
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
s, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
@@ -614,7 +650,8 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+//
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
func (v *SummaryVec) With(labels Labels) Observer {
s, err := v.GetMetricWith(labels)
if err != nil {
@@ -660,6 +697,7 @@ type constSummary struct {
sum float64
quantiles map[float64]float64
labelPairs []*dto.LabelPair
+ createdTs *timestamppb.Timestamp
}
func (s *constSummary) Desc() *Desc {
@@ -667,7 +705,9 @@ func (s *constSummary) Desc() *Desc {
}
func (s *constSummary) Write(out *dto.Metric) error {
- sum := &dto.Summary{}
+ sum := &dto.Summary{
+ CreatedTimestamp: s.createdTs,
+ }
qs := make([]*dto.Quantile, 0, len(s.quantiles))
sum.SampleCount = proto.Uint64(s.count)
@@ -701,7 +741,8 @@ func (s *constSummary) Write(out *dto.Metric) error {
//
// quantiles maps ranks to quantile values. For example, a median latency of
// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
-// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
//
// NewConstSummary returns an error if the length of labelValues is not
// consistent with the variable labels in Desc or if Desc is invalid.
@@ -715,7 +756,7 @@ func NewConstSummary(
if desc.err != nil {
return nil, desc.err
}
- if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
return &constSummary{
@@ -742,3 +783,45 @@ func MustNewConstSummary(
}
return m
}
+
+// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp.
+func NewConstSummaryWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ ct time.Time,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ createdTs: timestamppb.New(ct),
+ }, nil
+}
+
+// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where
+// NewConstSummaryWithCreatedTimestamp would have returned an error.
+func MustNewConstSummaryWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ ct time.Time,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
index 8d5f1052337..52344fef53f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -23,13 +23,24 @@ type Timer struct {
}
// NewTimer creates a new Timer. The provided Observer is used to observe a
-// duration in seconds. Timer is usually used to time a function call in the
+// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
+// later on will be also supported.
+// Timer is usually used to time a function call in the
// following way:
-// func TimeMe() {
-// timer := NewTimer(myHistogram)
-// defer timer.ObserveDuration()
-// // Do actual work.
-// }
+//
+// func TimeMe() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDuration()
+// // Do actual work.
+// }
+//
+// or
+//
+// func TimeMeWithExemplar() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDurationWithExemplar(exemplar)
+// // Do actual work.
+// }
func NewTimer(o Observer) *Timer {
return &Timer{
begin: time.Now(),
@@ -52,3 +63,19 @@ func (t *Timer) ObserveDuration() time.Duration {
}
return d
}
+
+// ObserveDurationWithExemplar is like ObserveDuration, but it will also
+// observe exemplar with the duration unless exemplar is nil or provided Observer can't
+// be casted to ExemplarObserver.
+func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
+ d := time.Since(t.begin)
+ eo, ok := t.observer.(ExemplarObserver)
+ if ok && exemplar != nil {
+ eo.ObserveWithExemplar(d.Seconds(), exemplar)
+ return d
+ }
+ if t.observer != nil {
+ t.observer.Observe(d.Seconds())
+ }
+ return d
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index c778711b8ab..cc23011fad2 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -14,16 +14,17 @@
package prometheus
import (
+ "errors"
"fmt"
"sort"
"time"
"unicode/utf8"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes"
+ "github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
// ValueType is an enumeration of metric types that represent a simple value.
@@ -38,6 +39,23 @@ const (
UntypedValue
)
+var (
+ CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }()
+ GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }()
+ UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }()
+)
+
+func (v ValueType) ToDTO() *dto.MetricType {
+ switch v {
+ case CounterValue:
+ return CounterMetricTypePtr
+ case GaugeValue:
+ return GaugeMetricTypePtr
+ default:
+ return UntypedMetricTypePtr
+ }
+}
+
// valueFunc is a generic metric for simple values retrieved on collect time
// from a function. It implements Metric and Collector. Its effective type is
// determined by ValueType. This is a low-level building block used by the
@@ -74,7 +92,7 @@ func (v *valueFunc) Desc() *Desc {
}
func (v *valueFunc) Write(out *dto.Metric) error {
- return populateMetric(v.valType, v.function(), v.labelPairs, nil, out)
+ return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil)
}
// NewConstMetric returns a metric with one fixed value that cannot be
@@ -88,14 +106,18 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues
if desc.err != nil {
return nil, desc.err
}
- if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+
+ metric := &dto.Metric{}
+ if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil {
return nil, err
}
+
return &constMetric{
- desc: desc,
- valType: valueType,
- val: value,
- labelPairs: MakeLabelPairs(desc, labelValues),
+ desc: desc,
+ metric: metric,
}, nil
}
@@ -109,11 +131,46 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal
return m
}
+// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters
+// with created timestamp set and returns an error for other metric types.
+func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ switch valueType {
+ case CounterValue:
+ break
+ default:
+ return nil, errors.New("created timestamps are only supported for counters")
+ }
+
+ metric := &dto.Metric{}
+ if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil {
+ return nil, err
+ }
+
+ return &constMetric{
+ desc: desc,
+ metric: metric,
+ }, nil
+}
+
+// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where
+// NewConstMetricWithCreatedTimestamp would have returned an error.
+func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric {
+ m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
type constMetric struct {
- desc *Desc
- valType ValueType
- val float64
- labelPairs []*dto.LabelPair
+ desc *Desc
+ metric *dto.Metric
}
func (m *constMetric) Desc() *Desc {
@@ -121,7 +178,11 @@ func (m *constMetric) Desc() *Desc {
}
func (m *constMetric) Write(out *dto.Metric) error {
- return populateMetric(m.valType, m.val, m.labelPairs, nil, out)
+ out.Label = m.metric.Label
+ out.Counter = m.metric.Counter
+ out.Gauge = m.metric.Gauge
+ out.Untyped = m.metric.Untyped
+ return nil
}
func populateMetric(
@@ -130,11 +191,12 @@ func populateMetric(
labelPairs []*dto.LabelPair,
e *dto.Exemplar,
m *dto.Metric,
+ ct *timestamppb.Timestamp,
) error {
m.Label = labelPairs
switch t {
case CounterValue:
- m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e}
+ m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct}
case GaugeValue:
m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
case UntypedValue:
@@ -153,29 +215,29 @@ func populateMetric(
// This function is only needed for custom Metric implementations. See MetricVec
// example.
func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
- totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+ totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs)
if totalLen == 0 {
// Super fast path.
return nil
}
- if len(desc.variableLabels) == 0 {
+ if len(desc.variableLabels.names) == 0 {
// Moderately fast path.
return desc.constLabelPairs
}
labelPairs := make([]*dto.LabelPair, 0, totalLen)
- for i, n := range desc.variableLabels {
+ for i, l := range desc.variableLabels.names {
labelPairs = append(labelPairs, &dto.LabelPair{
- Name: proto.String(n),
+ Name: proto.String(l),
Value: proto.String(labelValues[i]),
})
}
labelPairs = append(labelPairs, desc.constLabelPairs...)
- sort.Sort(labelPairSorter(labelPairs))
+ sort.Sort(internal.LabelPairSorter(labelPairs))
return labelPairs
}
// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
-const ExemplarMaxRunes = 64
+const ExemplarMaxRunes = 128
// newExemplar creates a new dto.Exemplar from the provided values. An error is
// returned if any of the label names or values are invalid or if the total
@@ -183,8 +245,8 @@ const ExemplarMaxRunes = 64
func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
e := &dto.Exemplar{}
e.Value = proto.Float64(value)
- tsProto, err := ptypes.TimestampProto(ts)
- if err != nil {
+ tsProto := timestamppb.New(ts)
+ if err := tsProto.CheckValid(); err != nil {
return nil, err
}
e.Timestamp = tsProto
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 4ababe6c981..2c808eece0a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -72,6 +72,8 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
// with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example.
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ lvs = constrainLabelValues(m.desc, lvs, m.curry)
+
h, err := m.hashLabelValues(lvs)
if err != nil {
return false
@@ -91,6 +93,9 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
// This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods.
func (m *MetricVec) Delete(labels Labels) bool {
+ labels, closer := constrainLabels(m.desc, labels)
+ defer closer()
+
h, err := m.hashLabels(labels)
if err != nil {
return false
@@ -99,6 +104,19 @@ func (m *MetricVec) Delete(labels Labels) bool {
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
}
+// DeletePartialMatch deletes all metrics where the variable labels contain all of those
+// passed in as labels. The order of the labels does not matter.
+// It returns the number of metrics deleted.
+//
+// Note that curried labels will never be matched if deleting from the curried vector.
+// To match curried labels with DeletePartialMatch, it must be called on the base vector.
+func (m *MetricVec) DeletePartialMatch(labels Labels) int {
+ labels, closer := constrainLabels(m.desc, labels)
+ defer closer()
+
+ return m.metricMap.deleteByLabels(labels, m.curry)
+}
+
// Without explicit forwarding of Describe, Collect, Reset, those methods won't
// show up in GoDoc.
@@ -134,11 +152,11 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
oldCurry = m.curry
iCurry int
)
- for i, label := range m.desc.variableLabels {
- val, ok := labels[label]
+ for i, labelName := range m.desc.variableLabels.names {
+ val, ok := labels[labelName]
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
if ok {
- return nil, fmt.Errorf("label name %q is already curried", label)
+ return nil, fmt.Errorf("label name %q is already curried", labelName)
}
newCurry = append(newCurry, oldCurry[iCurry])
iCurry++
@@ -146,7 +164,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
if !ok {
continue // Label stays uncurried.
}
- newCurry = append(newCurry, curriedLabelValue{i, val})
+ newCurry = append(newCurry, curriedLabelValue{
+ i,
+ m.desc.variableLabels.constrain(labelName, val),
+ })
}
}
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
@@ -189,6 +210,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
// a wrapper around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ lvs = constrainLabelValues(m.desc, lvs, m.curry)
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
@@ -214,6 +236,9 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
// around MetricVec, implementing a vector for a specific Metric implementation,
// for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ labels, closer := constrainLabels(m.desc, labels)
+ defer closer()
+
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
@@ -223,7 +248,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
}
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
- if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+ if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
return 0, err
}
@@ -232,7 +257,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
curry = m.curry
iVals, iCurry int
)
- for i := 0; i < len(m.desc.variableLabels); i++ {
+ for i := 0; i < len(m.desc.variableLabels.names); i++ {
if iCurry < len(curry) && curry[iCurry].index == i {
h = m.hashAdd(h, curry[iCurry].value)
iCurry++
@@ -246,7 +271,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
}
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
- if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+ if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
return 0, err
}
@@ -255,17 +280,17 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
curry = m.curry
iCurry int
)
- for i, label := range m.desc.variableLabels {
- val, ok := labels[label]
+ for i, labelName := range m.desc.variableLabels.names {
+ val, ok := labels[labelName]
if iCurry < len(curry) && curry[iCurry].index == i {
if ok {
- return 0, fmt.Errorf("label name %q is already curried", label)
+ return 0, fmt.Errorf("label name %q is already curried", labelName)
}
h = m.hashAdd(h, curry[iCurry].value)
iCurry++
} else {
if !ok {
- return 0, fmt.Errorf("label name %q missing in label map", label)
+ return 0, fmt.Errorf("label name %q missing in label map", labelName)
}
h = m.hashAdd(h, val)
}
@@ -381,6 +406,82 @@ func (m *metricMap) deleteByHashWithLabels(
return true
}
+// deleteByLabels deletes a metric if the given labels are present in the metric.
+func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ var numDeleted int
+
+ for h, metrics := range m.metrics {
+ i := findMetricWithPartialLabels(m.desc, metrics, labels, curry)
+ if i >= len(metrics) {
+ // Didn't find matching labels in this metric slice.
+ continue
+ }
+ delete(m.metrics, h)
+ numDeleted++
+ }
+
+ return numDeleted
+}
+
+// findMetricWithPartialLabel returns the index of the matching metric or
+// len(metrics) if not found.
+func findMetricWithPartialLabels(
+ desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
+) int {
+ for i, metric := range metrics {
+ if matchPartialLabels(desc, metric.values, labels, curry) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+// indexOf searches the given slice of strings for the target string and returns
+// the index or len(items) as well as a boolean whether the search succeeded.
+func indexOf(target string, items []string) (int, bool) {
+ for i, l := range items {
+ if l == target {
+ return i, true
+ }
+ }
+ return len(items), false
+}
+
+// valueMatchesVariableOrCurriedValue determines if a value was previously curried,
+// and returns whether it matches either the "base" value or the curried value accordingly.
+// It also indicates whether the match is against a curried or uncurried value.
+func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) {
+ for _, curriedValue := range curry {
+ if curriedValue.index == index {
+ // This label was curried. See if the curried value matches our target.
+ return curriedValue.value == targetValue, true
+ }
+ }
+ // This label was not curried. See if the current value matches our target label.
+ return values[index] == targetValue, false
+}
+
+// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present.
+func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
+ for l, v := range labels {
+ // Check if the target label exists in our metrics and get the index.
+ varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names)
+ if validLabel {
+ // Check the value of that label against the target value.
+ // We don't consider curried values in partial matches.
+ matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry)
+ if matches && !curried {
+ continue
+ }
+ }
+ return false
+ }
+ return true
+}
+
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
// or creates it and returns the new one.
//
@@ -406,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues(
return metric
}
-// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// getOrCreateMetricWithLabels retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.
@@ -485,7 +586,7 @@ func findMetricWithLabels(
return len(metrics)
}
-func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
+func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool {
if len(values) != len(lvs)+len(curry) {
return false
}
@@ -511,7 +612,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
return false
}
iCurry := 0
- for i, k := range desc.variableLabels {
+ for i, k := range desc.variableLabels.names {
if iCurry < len(curry) && curry[iCurry].index == i {
if values[i] != curry[iCurry].value {
return false
@@ -529,7 +630,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
labelValues := make([]string, len(labels)+len(curry))
iCurry := 0
- for i, k := range desc.variableLabels {
+ for i, k := range desc.variableLabels.names {
if iCurry < len(curry) && curry[iCurry].index == i {
labelValues[i] = curry[iCurry].value
iCurry++
@@ -554,3 +655,55 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
}
return labelValues
}
+
+var labelsPool = &sync.Pool{
+ New: func() interface{} {
+ return make(Labels)
+ },
+}
+
+func constrainLabels(desc *Desc, labels Labels) (Labels, func()) {
+ if len(desc.variableLabels.labelConstraints) == 0 {
+ // Fast path when there's no constraints
+ return labels, func() {}
+ }
+
+ constrainedLabels := labelsPool.Get().(Labels)
+ for l, v := range labels {
+ constrainedLabels[l] = desc.variableLabels.constrain(l, v)
+ }
+
+ return constrainedLabels, func() {
+ for k := range constrainedLabels {
+ delete(constrainedLabels, k)
+ }
+ labelsPool.Put(constrainedLabels)
+ }
+}
+
+func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
+ if len(desc.variableLabels.labelConstraints) == 0 {
+ // Fast path when there's no constraints
+ return lvs
+ }
+
+ constrainedValues := make([]string, len(lvs))
+ var iCurry, iLVs int
+ for i := 0; i < len(lvs)+len(curry); i++ {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ iCurry++
+ continue
+ }
+
+ if i < len(desc.variableLabels.names) {
+ constrainedValues[iLVs] = desc.variableLabels.constrain(
+ desc.variableLabels.names[i],
+ lvs[iLVs],
+ )
+ } else {
+ constrainedValues[iLVs] = lvs[iLVs]
+ }
+ iLVs++
+ }
+ return constrainedValues
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
new file mode 100644
index 00000000000..42bc3a8f066
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+type v2 struct{}
+
+// V2 is a struct that can be referenced to access experimental API that might
+// be present in v2 of client golang someday. It offers extended functionality
+// of v1 with slightly changed API. It is acceptable to use some pieces from v1
+// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
+// in the same codebase.
+var V2 = v2{}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
index 74ee93280fe..25da157f152 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -17,10 +17,10 @@ import (
"fmt"
"sort"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
+ "github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
)
// WrapRegistererWith returns a Registerer wrapping the provided
@@ -182,7 +182,7 @@ func (m *wrappingMetric) Write(out *dto.Metric) error {
Value: proto.String(lv),
})
}
- sort.Sort(labelPairSorter(out.Label))
+ sort.Sort(internal.LabelPairSorter(out.Label))
return nil
}
@@ -204,7 +204,7 @@ func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
constLabels[ln] = lv
}
// NewDesc will do remaining validations.
- newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
+ newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
// Propagate errors if there was any. This will override any errer
// created by NewDesc above, i.e. earlier errors get precedence.
if desc.err != nil {
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
index 35904ea1986..2f15490758a 100644
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -1,25 +1,38 @@
+// Copyright 2013 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.20.3
// source: io/prometheus/client/metrics.proto
package io_prometheus_client
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- timestamp "github.com/golang/protobuf/ptypes/timestamp"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
type MetricType int32
@@ -38,23 +51,25 @@ const (
MetricType_GAUGE_HISTOGRAM MetricType = 5
)
-var MetricType_name = map[int32]string{
- 0: "COUNTER",
- 1: "GAUGE",
- 2: "SUMMARY",
- 3: "UNTYPED",
- 4: "HISTOGRAM",
- 5: "GAUGE_HISTOGRAM",
-}
-
-var MetricType_value = map[string]int32{
- "COUNTER": 0,
- "GAUGE": 1,
- "SUMMARY": 2,
- "UNTYPED": 3,
- "HISTOGRAM": 4,
- "GAUGE_HISTOGRAM": 5,
-}
+// Enum value maps for MetricType.
+var (
+ MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+ 5: "GAUGE_HISTOGRAM",
+ }
+ MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+ "GAUGE_HISTOGRAM": 5,
+ }
+)
func (x MetricType) Enum() *MetricType {
p := new(MetricType)
@@ -63,449 +78,555 @@ func (x MetricType) Enum() *MetricType {
}
func (x MetricType) String() string {
- return proto.EnumName(MetricType_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (MetricType) Descriptor() protoreflect.EnumDescriptor {
+ return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor()
+}
+
+func (MetricType) Type() protoreflect.EnumType {
+ return &file_io_prometheus_client_metrics_proto_enumTypes[0]
}
-func (x *MetricType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+func (x MetricType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *MetricType) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
if err != nil {
return err
}
- *x = MetricType(value)
+ *x = MetricType(num)
return nil
}
+// Deprecated: Use MetricType.Descriptor instead.
func (MetricType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{0}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
}
type LabelPair struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *LabelPair) Reset() { *m = LabelPair{} }
-func (m *LabelPair) String() string { return proto.CompactTextString(m) }
-func (*LabelPair) ProtoMessage() {}
-func (*LabelPair) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{0}
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
}
-func (m *LabelPair) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LabelPair.Unmarshal(m, b)
-}
-func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
-}
-func (m *LabelPair) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelPair.Merge(m, src)
+func (x *LabelPair) Reset() {
+ *x = LabelPair{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *LabelPair) XXX_Size() int {
- return xxx_messageInfo_LabelPair.Size(m)
+
+func (x *LabelPair) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *LabelPair) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelPair.DiscardUnknown(m)
+
+func (*LabelPair) ProtoMessage() {}
+
+func (x *LabelPair) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_LabelPair proto.InternalMessageInfo
+// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead.
+func (*LabelPair) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
+}
-func (m *LabelPair) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
+func (x *LabelPair) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
}
return ""
}
-func (m *LabelPair) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *LabelPair) GetValue() string {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return ""
}
type Gauge struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Gauge) Reset() { *m = Gauge{} }
-func (m *Gauge) String() string { return proto.CompactTextString(m) }
-func (*Gauge) ProtoMessage() {}
-func (*Gauge) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{1}
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
}
-func (m *Gauge) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Gauge.Unmarshal(m, b)
-}
-func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
-}
-func (m *Gauge) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Gauge.Merge(m, src)
+func (x *Gauge) Reset() {
+ *x = Gauge{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Gauge) XXX_Size() int {
- return xxx_messageInfo_Gauge.Size(m)
+
+func (x *Gauge) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Gauge) XXX_DiscardUnknown() {
- xxx_messageInfo_Gauge.DiscardUnknown(m)
+
+func (*Gauge) ProtoMessage() {}
+
+func (x *Gauge) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Gauge proto.InternalMessageInfo
+// Deprecated: Use Gauge.ProtoReflect.Descriptor instead.
+func (*Gauge) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1}
+}
-func (m *Gauge) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Gauge) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Counter struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Counter) Reset() { *m = Counter{} }
-func (m *Counter) String() string { return proto.CompactTextString(m) }
-func (*Counter) ProtoMessage() {}
-func (*Counter) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{2}
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
+ CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
}
-func (m *Counter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Counter.Unmarshal(m, b)
-}
-func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
-}
-func (m *Counter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Counter.Merge(m, src)
+func (x *Counter) Reset() {
+ *x = Counter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Counter) XXX_Size() int {
- return xxx_messageInfo_Counter.Size(m)
+
+func (x *Counter) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Counter) XXX_DiscardUnknown() {
- xxx_messageInfo_Counter.DiscardUnknown(m)
+
+func (*Counter) ProtoMessage() {}
+
+func (x *Counter) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Counter proto.InternalMessageInfo
+// Deprecated: Use Counter.ProtoReflect.Descriptor instead.
+func (*Counter) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2}
+}
-func (m *Counter) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Counter) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
-func (m *Counter) GetExemplar() *Exemplar {
- if m != nil {
- return m.Exemplar
+func (x *Counter) GetExemplar() *Exemplar {
+ if x != nil {
+ return x.Exemplar
}
return nil
}
-type Quantile struct {
- Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreatedTimestamp
+ }
+ return nil
}
-func (m *Quantile) Reset() { *m = Quantile{} }
-func (m *Quantile) String() string { return proto.CompactTextString(m) }
-func (*Quantile) ProtoMessage() {}
-func (*Quantile) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{3}
-}
+type Quantile struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Quantile) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Quantile.Unmarshal(m, b)
-}
-func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
}
-func (m *Quantile) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Quantile.Merge(m, src)
+
+func (x *Quantile) Reset() {
+ *x = Quantile{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Quantile) XXX_Size() int {
- return xxx_messageInfo_Quantile.Size(m)
+
+func (x *Quantile) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Quantile) XXX_DiscardUnknown() {
- xxx_messageInfo_Quantile.DiscardUnknown(m)
+
+func (*Quantile) ProtoMessage() {}
+
+func (x *Quantile) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Quantile proto.InternalMessageInfo
+// Deprecated: Use Quantile.ProtoReflect.Descriptor instead.
+func (*Quantile) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3}
+}
-func (m *Quantile) GetQuantile() float64 {
- if m != nil && m.Quantile != nil {
- return *m.Quantile
+func (x *Quantile) GetQuantile() float64 {
+ if x != nil && x.Quantile != nil {
+ return *x.Quantile
}
return 0
}
-func (m *Quantile) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Quantile) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Summary struct {
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
- Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
+}
+
+func (x *Summary) Reset() {
+ *x = Summary{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Summary) Reset() { *m = Summary{} }
-func (m *Summary) String() string { return proto.CompactTextString(m) }
-func (*Summary) ProtoMessage() {}
-func (*Summary) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{4}
+func (x *Summary) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Summary) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Summary.Unmarshal(m, b)
-}
-func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
-}
-func (m *Summary) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Summary.Merge(m, src)
-}
-func (m *Summary) XXX_Size() int {
- return xxx_messageInfo_Summary.Size(m)
-}
-func (m *Summary) XXX_DiscardUnknown() {
- xxx_messageInfo_Summary.DiscardUnknown(m)
+func (*Summary) ProtoMessage() {}
+
+func (x *Summary) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Summary proto.InternalMessageInfo
+// Deprecated: Use Summary.ProtoReflect.Descriptor instead.
+func (*Summary) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4}
+}
-func (m *Summary) GetSampleCount() uint64 {
- if m != nil && m.SampleCount != nil {
- return *m.SampleCount
+func (x *Summary) GetSampleCount() uint64 {
+ if x != nil && x.SampleCount != nil {
+ return *x.SampleCount
}
return 0
}
-func (m *Summary) GetSampleSum() float64 {
- if m != nil && m.SampleSum != nil {
- return *m.SampleSum
+func (x *Summary) GetSampleSum() float64 {
+ if x != nil && x.SampleSum != nil {
+ return *x.SampleSum
}
return 0
}
-func (m *Summary) GetQuantile() []*Quantile {
- if m != nil {
- return m.Quantile
+func (x *Summary) GetQuantile() []*Quantile {
+ if x != nil {
+ return x.Quantile
}
return nil
}
-type Untyped struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreatedTimestamp
+ }
+ return nil
}
-func (m *Untyped) Reset() { *m = Untyped{} }
-func (m *Untyped) String() string { return proto.CompactTextString(m) }
-func (*Untyped) ProtoMessage() {}
-func (*Untyped) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{5}
-}
+type Untyped struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Untyped) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Untyped.Unmarshal(m, b)
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
}
-func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
-}
-func (m *Untyped) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Untyped.Merge(m, src)
+
+func (x *Untyped) Reset() {
+ *x = Untyped{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Untyped) XXX_Size() int {
- return xxx_messageInfo_Untyped.Size(m)
+
+func (x *Untyped) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Untyped) XXX_DiscardUnknown() {
- xxx_messageInfo_Untyped.DiscardUnknown(m)
+
+func (*Untyped) ProtoMessage() {}
+
+func (x *Untyped) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Untyped proto.InternalMessageInfo
+// Deprecated: Use Untyped.ProtoReflect.Descriptor instead.
+func (*Untyped) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5}
+}
-func (m *Untyped) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Untyped) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Histogram struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"`
+ SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0.
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
// Buckets for the conventional histogram.
- Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
// then each power of two is divided into 2^n logarithmic buckets.
// Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
// In the future, more bucket schemas may be added using numbers < -4 or > 8.
Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"`
- ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"`
- ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"`
- ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"`
+ ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket.
+ ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket.
+ ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0.
// Negative buckets for the native histogram.
NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"`
// Use either "negative_delta" or "negative_count", the former for
// regular histograms with integer counts, the latter for float
// histograms.
- NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"`
- NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"`
+ NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+ NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket.
// Positive buckets for the native histogram.
+ // Use a no-op span (offset 0, length 0) for a native histogram without any
+ // observations yet and with a zero_threshold of 0. Otherwise, it would be
+ // indistinguishable from a classic histogram.
PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
// Use either "positive_delta" or "positive_count", the former for
// regular histograms with integer counts, the latter for float
// histograms.
- PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"`
- PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+ PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket.
+ // Only used for native histograms. These exemplars MUST have a timestamp.
+ Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"`
+}
+
+func (x *Histogram) Reset() {
+ *x = Histogram{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Histogram) Reset() { *m = Histogram{} }
-func (m *Histogram) String() string { return proto.CompactTextString(m) }
-func (*Histogram) ProtoMessage() {}
-func (*Histogram) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{6}
+func (x *Histogram) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Histogram) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Histogram.Unmarshal(m, b)
-}
-func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
-}
-func (m *Histogram) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Histogram.Merge(m, src)
-}
-func (m *Histogram) XXX_Size() int {
- return xxx_messageInfo_Histogram.Size(m)
-}
-func (m *Histogram) XXX_DiscardUnknown() {
- xxx_messageInfo_Histogram.DiscardUnknown(m)
+func (*Histogram) ProtoMessage() {}
+
+func (x *Histogram) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Histogram proto.InternalMessageInfo
+// Deprecated: Use Histogram.ProtoReflect.Descriptor instead.
+func (*Histogram) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6}
+}
-func (m *Histogram) GetSampleCount() uint64 {
- if m != nil && m.SampleCount != nil {
- return *m.SampleCount
+func (x *Histogram) GetSampleCount() uint64 {
+ if x != nil && x.SampleCount != nil {
+ return *x.SampleCount
}
return 0
}
-func (m *Histogram) GetSampleCountFloat() float64 {
- if m != nil && m.SampleCountFloat != nil {
- return *m.SampleCountFloat
+func (x *Histogram) GetSampleCountFloat() float64 {
+ if x != nil && x.SampleCountFloat != nil {
+ return *x.SampleCountFloat
}
return 0
}
-func (m *Histogram) GetSampleSum() float64 {
- if m != nil && m.SampleSum != nil {
- return *m.SampleSum
+func (x *Histogram) GetSampleSum() float64 {
+ if x != nil && x.SampleSum != nil {
+ return *x.SampleSum
}
return 0
}
-func (m *Histogram) GetBucket() []*Bucket {
- if m != nil {
- return m.Bucket
+func (x *Histogram) GetBucket() []*Bucket {
+ if x != nil {
+ return x.Bucket
+ }
+ return nil
+}
+
+func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreatedTimestamp
}
return nil
}
-func (m *Histogram) GetSchema() int32 {
- if m != nil && m.Schema != nil {
- return *m.Schema
+func (x *Histogram) GetSchema() int32 {
+ if x != nil && x.Schema != nil {
+ return *x.Schema
}
return 0
}
-func (m *Histogram) GetZeroThreshold() float64 {
- if m != nil && m.ZeroThreshold != nil {
- return *m.ZeroThreshold
+func (x *Histogram) GetZeroThreshold() float64 {
+ if x != nil && x.ZeroThreshold != nil {
+ return *x.ZeroThreshold
}
return 0
}
-func (m *Histogram) GetZeroCount() uint64 {
- if m != nil && m.ZeroCount != nil {
- return *m.ZeroCount
+func (x *Histogram) GetZeroCount() uint64 {
+ if x != nil && x.ZeroCount != nil {
+ return *x.ZeroCount
}
return 0
}
-func (m *Histogram) GetZeroCountFloat() float64 {
- if m != nil && m.ZeroCountFloat != nil {
- return *m.ZeroCountFloat
+func (x *Histogram) GetZeroCountFloat() float64 {
+ if x != nil && x.ZeroCountFloat != nil {
+ return *x.ZeroCountFloat
}
return 0
}
-func (m *Histogram) GetNegativeSpan() []*BucketSpan {
- if m != nil {
- return m.NegativeSpan
+func (x *Histogram) GetNegativeSpan() []*BucketSpan {
+ if x != nil {
+ return x.NegativeSpan
+ }
+ return nil
+}
+
+func (x *Histogram) GetNegativeDelta() []int64 {
+ if x != nil {
+ return x.NegativeDelta
}
return nil
}
-func (m *Histogram) GetNegativeDelta() []int64 {
- if m != nil {
- return m.NegativeDelta
+func (x *Histogram) GetNegativeCount() []float64 {
+ if x != nil {
+ return x.NegativeCount
}
return nil
}
-func (m *Histogram) GetNegativeCount() []float64 {
- if m != nil {
- return m.NegativeCount
+func (x *Histogram) GetPositiveSpan() []*BucketSpan {
+ if x != nil {
+ return x.PositiveSpan
}
return nil
}
-func (m *Histogram) GetPositiveSpan() []*BucketSpan {
- if m != nil {
- return m.PositiveSpan
+func (x *Histogram) GetPositiveDelta() []int64 {
+ if x != nil {
+ return x.PositiveDelta
}
return nil
}
-func (m *Histogram) GetPositiveDelta() []int64 {
- if m != nil {
- return m.PositiveDelta
+func (x *Histogram) GetPositiveCount() []float64 {
+ if x != nil {
+ return x.PositiveCount
}
return nil
}
-func (m *Histogram) GetPositiveCount() []float64 {
- if m != nil {
- return m.PositiveCount
+func (x *Histogram) GetExemplars() []*Exemplar {
+ if x != nil {
+ return x.Exemplars
}
return nil
}
@@ -513,64 +634,72 @@ func (m *Histogram) GetPositiveCount() []float64 {
// A Bucket of a conventional histogram, each of which is treated as
// an individual counter-like time series by Prometheus.
type Bucket struct {
- CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
- CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"`
- UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order.
+ CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0.
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive.
Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
}
-func (m *Bucket) Reset() { *m = Bucket{} }
-func (m *Bucket) String() string { return proto.CompactTextString(m) }
-func (*Bucket) ProtoMessage() {}
-func (*Bucket) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{7}
+func (x *Bucket) Reset() {
+ *x = Bucket{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Bucket) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Bucket.Unmarshal(m, b)
-}
-func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
+func (x *Bucket) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Bucket) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Bucket.Merge(m, src)
-}
-func (m *Bucket) XXX_Size() int {
- return xxx_messageInfo_Bucket.Size(m)
-}
-func (m *Bucket) XXX_DiscardUnknown() {
- xxx_messageInfo_Bucket.DiscardUnknown(m)
+
+func (*Bucket) ProtoMessage() {}
+
+func (x *Bucket) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Bucket proto.InternalMessageInfo
+// Deprecated: Use Bucket.ProtoReflect.Descriptor instead.
+func (*Bucket) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7}
+}
-func (m *Bucket) GetCumulativeCount() uint64 {
- if m != nil && m.CumulativeCount != nil {
- return *m.CumulativeCount
+func (x *Bucket) GetCumulativeCount() uint64 {
+ if x != nil && x.CumulativeCount != nil {
+ return *x.CumulativeCount
}
return 0
}
-func (m *Bucket) GetCumulativeCountFloat() float64 {
- if m != nil && m.CumulativeCountFloat != nil {
- return *m.CumulativeCountFloat
+func (x *Bucket) GetCumulativeCountFloat() float64 {
+ if x != nil && x.CumulativeCountFloat != nil {
+ return *x.CumulativeCountFloat
}
return 0
}
-func (m *Bucket) GetUpperBound() float64 {
- if m != nil && m.UpperBound != nil {
- return *m.UpperBound
+func (x *Bucket) GetUpperBound() float64 {
+ if x != nil && x.UpperBound != nil {
+ return *x.UpperBound
}
return 0
}
-func (m *Bucket) GetExemplar() *Exemplar {
- if m != nil {
- return m.Exemplar
+func (x *Bucket) GetExemplar() *Exemplar {
+ if x != nil {
+ return x.Exemplar
}
return nil
}
@@ -582,333 +711,689 @@ func (m *Bucket) GetExemplar() *Exemplar {
// structured here (with all the buckets in a single array separate
// from the Spans).
type BucketSpan struct {
- Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"`
- Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *BucketSpan) Reset() { *m = BucketSpan{} }
-func (m *BucketSpan) String() string { return proto.CompactTextString(m) }
-func (*BucketSpan) ProtoMessage() {}
-func (*BucketSpan) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{8}
+ Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative).
+ Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets.
}
-func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_BucketSpan.Unmarshal(m, b)
-}
-func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
-}
-func (m *BucketSpan) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BucketSpan.Merge(m, src)
+func (x *BucketSpan) Reset() {
+ *x = BucketSpan{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *BucketSpan) XXX_Size() int {
- return xxx_messageInfo_BucketSpan.Size(m)
+
+func (x *BucketSpan) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *BucketSpan) XXX_DiscardUnknown() {
- xxx_messageInfo_BucketSpan.DiscardUnknown(m)
+
+func (*BucketSpan) ProtoMessage() {}
+
+func (x *BucketSpan) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
+// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead.
+func (*BucketSpan) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8}
+}
-func (m *BucketSpan) GetOffset() int32 {
- if m != nil && m.Offset != nil {
- return *m.Offset
+func (x *BucketSpan) GetOffset() int32 {
+ if x != nil && x.Offset != nil {
+ return *x.Offset
}
return 0
}
-func (m *BucketSpan) GetLength() uint32 {
- if m != nil && m.Length != nil {
- return *m.Length
+func (x *BucketSpan) GetLength() uint32 {
+ if x != nil && x.Length != nil {
+ return *x.Length
}
return 0
}
type Exemplar struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (m *Exemplar) Reset() { *m = Exemplar{} }
-func (m *Exemplar) String() string { return proto.CompactTextString(m) }
-func (*Exemplar) ProtoMessage() {}
-func (*Exemplar) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{9}
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style.
}
-func (m *Exemplar) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Exemplar.Unmarshal(m, b)
-}
-func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
-}
-func (m *Exemplar) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Exemplar.Merge(m, src)
+func (x *Exemplar) Reset() {
+ *x = Exemplar{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Exemplar) XXX_Size() int {
- return xxx_messageInfo_Exemplar.Size(m)
+
+func (x *Exemplar) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Exemplar) XXX_DiscardUnknown() {
- xxx_messageInfo_Exemplar.DiscardUnknown(m)
+
+func (*Exemplar) ProtoMessage() {}
+
+func (x *Exemplar) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Exemplar proto.InternalMessageInfo
+// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead.
+func (*Exemplar) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9}
+}
-func (m *Exemplar) GetLabel() []*LabelPair {
- if m != nil {
- return m.Label
+func (x *Exemplar) GetLabel() []*LabelPair {
+ if x != nil {
+ return x.Label
}
return nil
}
-func (m *Exemplar) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Exemplar) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
-func (m *Exemplar) GetTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.Timestamp
+func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Timestamp
}
return nil
}
type Metric struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
- Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
- Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
- Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
- Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
- TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Metric) Reset() { *m = Metric{} }
-func (m *Metric) String() string { return proto.CompactTextString(m) }
-func (*Metric) ProtoMessage() {}
-func (*Metric) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{10}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
+}
+
+func (x *Metric) Reset() {
+ *x = Metric{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *Metric) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Metric.Unmarshal(m, b)
+func (x *Metric) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
-}
-func (m *Metric) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metric.Merge(m, src)
-}
-func (m *Metric) XXX_Size() int {
- return xxx_messageInfo_Metric.Size(m)
-}
-func (m *Metric) XXX_DiscardUnknown() {
- xxx_messageInfo_Metric.DiscardUnknown(m)
+
+func (*Metric) ProtoMessage() {}
+
+func (x *Metric) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_Metric proto.InternalMessageInfo
+// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
+func (*Metric) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10}
+}
-func (m *Metric) GetLabel() []*LabelPair {
- if m != nil {
- return m.Label
+func (x *Metric) GetLabel() []*LabelPair {
+ if x != nil {
+ return x.Label
}
return nil
}
-func (m *Metric) GetGauge() *Gauge {
- if m != nil {
- return m.Gauge
+func (x *Metric) GetGauge() *Gauge {
+ if x != nil {
+ return x.Gauge
}
return nil
}
-func (m *Metric) GetCounter() *Counter {
- if m != nil {
- return m.Counter
+func (x *Metric) GetCounter() *Counter {
+ if x != nil {
+ return x.Counter
}
return nil
}
-func (m *Metric) GetSummary() *Summary {
- if m != nil {
- return m.Summary
+func (x *Metric) GetSummary() *Summary {
+ if x != nil {
+ return x.Summary
}
return nil
}
-func (m *Metric) GetUntyped() *Untyped {
- if m != nil {
- return m.Untyped
+func (x *Metric) GetUntyped() *Untyped {
+ if x != nil {
+ return x.Untyped
}
return nil
}
-func (m *Metric) GetHistogram() *Histogram {
- if m != nil {
- return m.Histogram
+func (x *Metric) GetHistogram() *Histogram {
+ if x != nil {
+ return x.Histogram
}
return nil
}
-func (m *Metric) GetTimestampMs() int64 {
- if m != nil && m.TimestampMs != nil {
- return *m.TimestampMs
+func (x *Metric) GetTimestampMs() int64 {
+ if x != nil && x.TimestampMs != nil {
+ return *x.TimestampMs
}
return 0
}
type MetricFamily struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
- Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
- Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MetricFamily) Reset() { *m = MetricFamily{} }
-func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
-func (*MetricFamily) ProtoMessage() {}
-func (*MetricFamily) Descriptor() ([]byte, []int) {
- return fileDescriptor_d1e5ddb18987a258, []int{11}
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"`
+}
+
+func (x *MetricFamily) Reset() {
+ *x = MetricFamily{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
}
-func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
-}
-func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
+func (x *MetricFamily) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (m *MetricFamily) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricFamily.Merge(m, src)
-}
-func (m *MetricFamily) XXX_Size() int {
- return xxx_messageInfo_MetricFamily.Size(m)
-}
-func (m *MetricFamily) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricFamily.DiscardUnknown(m)
+
+func (*MetricFamily) ProtoMessage() {}
+
+func (x *MetricFamily) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
}
-var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
+// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead.
+func (*MetricFamily) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11}
+}
-func (m *MetricFamily) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
+func (x *MetricFamily) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
}
return ""
}
-func (m *MetricFamily) GetHelp() string {
- if m != nil && m.Help != nil {
- return *m.Help
+func (x *MetricFamily) GetHelp() string {
+ if x != nil && x.Help != nil {
+ return *x.Help
}
return ""
}
-func (m *MetricFamily) GetType() MetricType {
- if m != nil && m.Type != nil {
- return *m.Type
+func (x *MetricFamily) GetType() MetricType {
+ if x != nil && x.Type != nil {
+ return *x.Type
}
return MetricType_COUNTER
}
-func (m *MetricFamily) GetMetric() []*Metric {
- if m != nil {
- return m.Metric
+func (x *MetricFamily) GetMetric() []*Metric {
+ if x != nil {
+ return x.Metric
}
return nil
}
-func init() {
- proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
- proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
- proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
- proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
- proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
- proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
- proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
- proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
- proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
- proto.RegisterType((*BucketSpan)(nil), "io.prometheus.client.BucketSpan")
- proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
- proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
- proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
-}
-
-func init() {
- proto.RegisterFile("io/prometheus/client/metrics.proto", fileDescriptor_d1e5ddb18987a258)
-}
-
-var fileDescriptor_d1e5ddb18987a258 = []byte{
- // 896 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xdd, 0x8e, 0xdb, 0x44,
- 0x18, 0xc5, 0x9b, 0x5f, 0x7f, 0xd9, 0x6c, 0xd3, 0x61, 0x55, 0x59, 0x0b, 0xcb, 0x06, 0x4b, 0x48,
- 0x0b, 0x42, 0x8e, 0x40, 0x5b, 0x81, 0x0a, 0x5c, 0xec, 0xb6, 0xe9, 0x16, 0x89, 0xb4, 0x65, 0x92,
- 0x5c, 0x14, 0x2e, 0xac, 0x49, 0x32, 0xeb, 0x58, 0x78, 0x3c, 0xc6, 0x1e, 0x57, 0x2c, 0x2f, 0xc0,
- 0x35, 0xaf, 0xc0, 0xc3, 0xf0, 0x22, 0x3c, 0x08, 0x68, 0xfe, 0xec, 0xdd, 0xe2, 0x94, 0xd2, 0x3b,
- 0x7f, 0x67, 0xce, 0xf7, 0xcd, 0x39, 0xe3, 0xc9, 0x71, 0xc0, 0x8f, 0xf9, 0x24, 0xcb, 0x39, 0xa3,
- 0x62, 0x4b, 0xcb, 0x62, 0xb2, 0x4e, 0x62, 0x9a, 0x8a, 0x09, 0xa3, 0x22, 0x8f, 0xd7, 0x45, 0x90,
- 0xe5, 0x5c, 0x70, 0x74, 0x18, 0xf3, 0xa0, 0xe6, 0x04, 0x9a, 0x73, 0x74, 0x12, 0x71, 0x1e, 0x25,
- 0x74, 0xa2, 0x38, 0xab, 0xf2, 0x6a, 0x22, 0x62, 0x46, 0x0b, 0x41, 0x58, 0xa6, 0xdb, 0xfc, 0xfb,
- 0xe0, 0x7e, 0x47, 0x56, 0x34, 0x79, 0x4e, 0xe2, 0x1c, 0x21, 0x68, 0xa7, 0x84, 0x51, 0xcf, 0x19,
- 0x3b, 0xa7, 0x2e, 0x56, 0xcf, 0xe8, 0x10, 0x3a, 0x2f, 0x49, 0x52, 0x52, 0x6f, 0x4f, 0x81, 0xba,
- 0xf0, 0x8f, 0xa1, 0x73, 0x49, 0xca, 0xe8, 0xc6, 0xb2, 0xec, 0x71, 0xec, 0xf2, 0x8f, 0xd0, 0x7b,
- 0xc8, 0xcb, 0x54, 0xd0, 0xbc, 0x99, 0x80, 0x1e, 0x40, 0x9f, 0xfe, 0x42, 0x59, 0x96, 0x90, 0x5c,
- 0x0d, 0x1e, 0x7c, 0xfe, 0x41, 0xd0, 0x64, 0x20, 0x98, 0x1a, 0x16, 0xae, 0xf8, 0xfe, 0xd7, 0xd0,
- 0xff, 0xbe, 0x24, 0xa9, 0x88, 0x13, 0x8a, 0x8e, 0xa0, 0xff, 0xb3, 0x79, 0x36, 0x1b, 0x54, 0xf5,
- 0x6d, 0xe5, 0x95, 0xb4, 0xdf, 0x1c, 0xe8, 0xcd, 0x4b, 0xc6, 0x48, 0x7e, 0x8d, 0x3e, 0x84, 0xfd,
- 0x82, 0xb0, 0x2c, 0xa1, 0xe1, 0x5a, 0xaa, 0x55, 0x13, 0xda, 0x78, 0xa0, 0x31, 0x65, 0x00, 0x1d,
- 0x03, 0x18, 0x4a, 0x51, 0x32, 0x33, 0xc9, 0xd5, 0xc8, 0xbc, 0x64, 0xd2, 0x47, 0xb5, 0x7f, 0x6b,
- 0xdc, 0xda, 0xed, 0xc3, 0x2a, 0xae, 0xf5, 0xf9, 0x27, 0xd0, 0x5b, 0xa6, 0xe2, 0x3a, 0xa3, 0x9b,
- 0x1d, 0xa7, 0xf8, 0x57, 0x1b, 0xdc, 0x27, 0x71, 0x21, 0x78, 0x94, 0x13, 0xf6, 0x26, 0x62, 0x3f,
- 0x05, 0x74, 0x93, 0x12, 0x5e, 0x25, 0x9c, 0x08, 0xaf, 0xad, 0x66, 0x8e, 0x6e, 0x10, 0x1f, 0x4b,
- 0xfc, 0xbf, 0xac, 0x9d, 0x41, 0x77, 0x55, 0xae, 0x7f, 0xa2, 0xc2, 0x18, 0x7b, 0xbf, 0xd9, 0xd8,
- 0x85, 0xe2, 0x60, 0xc3, 0x45, 0xf7, 0xa0, 0x5b, 0xac, 0xb7, 0x94, 0x11, 0xaf, 0x33, 0x76, 0x4e,
- 0xef, 0x62, 0x53, 0xa1, 0x8f, 0xe0, 0xe0, 0x57, 0x9a, 0xf3, 0x50, 0x6c, 0x73, 0x5a, 0x6c, 0x79,
- 0xb2, 0xf1, 0xba, 0x6a, 0xc3, 0xa1, 0x44, 0x17, 0x16, 0x94, 0x9a, 0x14, 0x4d, 0x5b, 0xec, 0x29,
- 0x8b, 0xae, 0x44, 0xb4, 0xc1, 0x53, 0x18, 0xd5, 0xcb, 0xc6, 0x5e, 0x5f, 0xcd, 0x39, 0xa8, 0x48,
- 0xda, 0xdc, 0x14, 0x86, 0x29, 0x8d, 0x88, 0x88, 0x5f, 0xd2, 0xb0, 0xc8, 0x48, 0xea, 0xb9, 0xca,
- 0xc4, 0xf8, 0x75, 0x26, 0xe6, 0x19, 0x49, 0xf1, 0xbe, 0x6d, 0x93, 0x95, 0x94, 0x5d, 0x8d, 0xd9,
- 0xd0, 0x44, 0x10, 0x0f, 0xc6, 0xad, 0x53, 0x84, 0xab, 0xe1, 0x8f, 0x24, 0x78, 0x8b, 0xa6, 0xa5,
- 0x0f, 0xc6, 0x2d, 0xe9, 0xce, 0xa2, 0x5a, 0xfe, 0x14, 0x86, 0x19, 0x2f, 0xe2, 0x5a, 0xd4, 0xfe,
- 0x9b, 0x8a, 0xb2, 0x6d, 0x56, 0x54, 0x35, 0x46, 0x8b, 0x1a, 0x6a, 0x51, 0x16, 0xad, 0x44, 0x55,
- 0x34, 0x2d, 0xea, 0x40, 0x8b, 0xb2, 0xa8, 0x12, 0xe5, 0xff, 0xe9, 0x40, 0x57, 0x6f, 0x85, 0x3e,
- 0x86, 0xd1, 0xba, 0x64, 0x65, 0x72, 0xd3, 0x88, 0xbe, 0x66, 0x77, 0x6a, 0x5c, 0x5b, 0x39, 0x83,
- 0x7b, 0xaf, 0x52, 0x6f, 0x5d, 0xb7, 0xc3, 0x57, 0x1a, 0xf4, 0x5b, 0x39, 0x81, 0x41, 0x99, 0x65,
- 0x34, 0x0f, 0x57, 0xbc, 0x4c, 0x37, 0xe6, 0xce, 0x81, 0x82, 0x2e, 0x24, 0x72, 0x2b, 0x17, 0x5a,
- 0xff, 0x3b, 0x17, 0xa0, 0x3e, 0x32, 0x79, 0x11, 0xf9, 0xd5, 0x55, 0x41, 0xb5, 0x83, 0xbb, 0xd8,
- 0x54, 0x12, 0x4f, 0x68, 0x1a, 0x89, 0xad, 0xda, 0x7d, 0x88, 0x4d, 0xe5, 0xff, 0xee, 0x40, 0xdf,
- 0x0e, 0x45, 0xf7, 0xa1, 0x93, 0xc8, 0x54, 0xf4, 0x1c, 0xf5, 0x82, 0x4e, 0x9a, 0x35, 0x54, 0xc1,
- 0x89, 0x35, 0xbb, 0x39, 0x71, 0xd0, 0x97, 0xe0, 0x56, 0xa9, 0x6b, 0x4c, 0x1d, 0x05, 0x3a, 0x97,
- 0x03, 0x9b, 0xcb, 0xc1, 0xc2, 0x32, 0x70, 0x4d, 0xf6, 0xff, 0xde, 0x83, 0xee, 0x4c, 0xa5, 0xfc,
- 0xdb, 0x2a, 0xfa, 0x0c, 0x3a, 0x91, 0xcc, 0x69, 0x13, 0xb2, 0xef, 0x35, 0xb7, 0xa9, 0x28, 0xc7,
- 0x9a, 0x89, 0xbe, 0x80, 0xde, 0x5a, 0x67, 0xb7, 0x11, 0x7b, 0xdc, 0xdc, 0x64, 0x02, 0x1e, 0x5b,
- 0xb6, 0x6c, 0x2c, 0x74, 0xb0, 0xaa, 0x3b, 0xb0, 0xb3, 0xd1, 0xa4, 0x2f, 0xb6, 0x6c, 0xd9, 0x58,
- 0xea, 0x20, 0x54, 0xa1, 0xb1, 0xb3, 0xd1, 0xa4, 0x25, 0xb6, 0x6c, 0xf4, 0x0d, 0xb8, 0x5b, 0x9b,
- 0x8f, 0x2a, 0x2c, 0x76, 0x1e, 0x4c, 0x15, 0xa3, 0xb8, 0xee, 0x90, 0x89, 0x5a, 0x9d, 0x75, 0xc8,
- 0x0a, 0x95, 0x48, 0x2d, 0x3c, 0xa8, 0xb0, 0x59, 0xe1, 0xff, 0xe1, 0xc0, 0xbe, 0x7e, 0x03, 0x8f,
- 0x09, 0x8b, 0x93, 0xeb, 0xc6, 0x4f, 0x24, 0x82, 0xf6, 0x96, 0x26, 0x99, 0xf9, 0x42, 0xaa, 0x67,
- 0x74, 0x06, 0x6d, 0xa9, 0x51, 0x1d, 0xe1, 0xc1, 0xae, 0x5f, 0xb8, 0x9e, 0xbc, 0xb8, 0xce, 0x28,
- 0x56, 0x6c, 0x99, 0xb9, 0xfa, 0xab, 0xee, 0xb5, 0x5f, 0x97, 0xb9, 0xba, 0x0f, 0x1b, 0xee, 0x27,
- 0x2b, 0x80, 0x7a, 0x12, 0x1a, 0x40, 0xef, 0xe1, 0xb3, 0xe5, 0xd3, 0xc5, 0x14, 0x8f, 0xde, 0x41,
- 0x2e, 0x74, 0x2e, 0xcf, 0x97, 0x97, 0xd3, 0x91, 0x23, 0xf1, 0xf9, 0x72, 0x36, 0x3b, 0xc7, 0x2f,
- 0x46, 0x7b, 0xb2, 0x58, 0x3e, 0x5d, 0xbc, 0x78, 0x3e, 0x7d, 0x34, 0x6a, 0xa1, 0x21, 0xb8, 0x4f,
- 0xbe, 0x9d, 0x2f, 0x9e, 0x5d, 0xe2, 0xf3, 0xd9, 0xa8, 0x8d, 0xde, 0x85, 0x3b, 0xaa, 0x27, 0xac,
- 0xc1, 0xce, 0x05, 0x86, 0xc6, 0x3f, 0x18, 0x3f, 0x3c, 0x88, 0x62, 0xb1, 0x2d, 0x57, 0xc1, 0x9a,
- 0xb3, 0x7f, 0xff, 0x45, 0x09, 0x19, 0xdf, 0xd0, 0x64, 0x12, 0xf1, 0xaf, 0x62, 0x1e, 0xd6, 0xab,
- 0xa1, 0x5e, 0xfd, 0x27, 0x00, 0x00, 0xff, 0xff, 0x16, 0x77, 0x81, 0x98, 0xd7, 0x08, 0x00, 0x00,
+func (x *MetricFamily) GetUnit() string {
+ if x != nil && x.Unit != nil {
+ return *x.Unit
+ }
+ return ""
+}
+
+var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor
+
+var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+ 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65,
+ 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12,
+ 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e,
+ 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61,
+ 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
+ 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c,
+ 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+ 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75,
+ 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74,
+ 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48,
+ 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
+ 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
+ 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73,
+ 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61,
+ 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d,
+ 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73,
+ 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
+ 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+ 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47,
+ 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
+ 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c,
+ 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72,
+ 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52,
+ 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12,
+ 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e,
+ 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+ 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
+ 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
+ 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d,
+ 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a,
+ 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+ 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65,
+ 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f,
+ 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70,
+ 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70,
+ 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20,
+ 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c,
+ 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65,
+ 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65,
+ 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75,
+ 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a,
+ 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63,
+ 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
+ 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75,
+ 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42,
+ 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+ 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+ 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16,
+ 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06,
+ 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91,
+ 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e,
+ 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62,
+ 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a,
+ 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65,
+ 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
+ 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72,
+ 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
+ 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79,
+ 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74,
+ 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
+ 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70,
+ 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+ 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73,
+ 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
+ 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d,
+ 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46,
+ 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c,
+ 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f,
+ 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69,
+ 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a,
+ 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43,
+ 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47,
+ 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02,
+ 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a,
+ 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f,
+ 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10,
+ 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+ 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+ 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f,
+ 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74,
+}
+
+var (
+ file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once
+ file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc
+)
+
+func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte {
+ file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() {
+ file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData)
+ })
+ return file_io_prometheus_client_metrics_proto_rawDescData
+}
+
+var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
+var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{
+ (MetricType)(0), // 0: io.prometheus.client.MetricType
+ (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair
+ (*Gauge)(nil), // 2: io.prometheus.client.Gauge
+ (*Counter)(nil), // 3: io.prometheus.client.Counter
+ (*Quantile)(nil), // 4: io.prometheus.client.Quantile
+ (*Summary)(nil), // 5: io.prometheus.client.Summary
+ (*Untyped)(nil), // 6: io.prometheus.client.Untyped
+ (*Histogram)(nil), // 7: io.prometheus.client.Histogram
+ (*Bucket)(nil), // 8: io.prometheus.client.Bucket
+ (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan
+ (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar
+ (*Metric)(nil), // 11: io.prometheus.client.Metric
+ (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily
+ (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp
+}
+var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
+ 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar
+ 13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp
+ 4, // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
+ 13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp
+ 8, // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
+ 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp
+ 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
+ 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
+ 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar
+ 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
+ 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
+ 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
+ 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
+ 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
+ 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
+ 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
+ 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
+ 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
+ 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
+ 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
+ 20, // [20:20] is the sub-list for method output_type
+ 20, // [20:20] is the sub-list for method input_type
+ 20, // [20:20] is the sub-list for extension type_name
+ 20, // [20:20] is the sub-list for extension extendee
+ 0, // [0:20] is the sub-list for field type_name
+}
+
+func init() { file_io_prometheus_client_metrics_proto_init() }
+func file_io_prometheus_client_metrics_proto_init() {
+ if File_io_prometheus_client_metrics_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LabelPair); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Gauge); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Counter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Quantile); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Summary); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Untyped); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Histogram); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bucket); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BucketSpan); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Exemplar); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Metric); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetricFamily); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 12,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_io_prometheus_client_metrics_proto_goTypes,
+ DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs,
+ EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes,
+ MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes,
+ }.Build()
+ File_io_prometheus_client_metrics_proto = out.File
+ file_io_prometheus_client_metrics_proto_rawDesc = nil
+ file_io_prometheus_client_metrics_proto_goTypes = nil
+ file_io_prometheus_client_metrics_proto_depIdxs = nil
}
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index 7657f841d63..25cfaa21643 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -14,6 +14,7 @@
package expfmt
import (
+ "bufio"
"fmt"
"io"
"math"
@@ -21,8 +22,8 @@ import (
"net/http"
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/encoding/protodelim"
- "github.com/matttproud/golang_protobuf_extensions/pbutil"
"github.com/prometheus/common/model"
)
@@ -44,7 +45,7 @@ func ResponseFormat(h http.Header) Format {
mediatype, params, err := mime.ParseMediaType(ct)
if err != nil {
- return FmtUnknown
+ return fmtUnknown
}
const textType = "text/plain"
@@ -52,42 +53,44 @@ func ResponseFormat(h http.Header) Format {
switch mediatype {
case ProtoType:
if p, ok := params["proto"]; ok && p != ProtoProtocol {
- return FmtUnknown
+ return fmtUnknown
}
if e, ok := params["encoding"]; ok && e != "delimited" {
- return FmtUnknown
+ return fmtUnknown
}
- return FmtProtoDelim
+ return fmtProtoDelim
case textType:
if v, ok := params["version"]; ok && v != TextVersion {
- return FmtUnknown
+ return fmtUnknown
}
- return FmtText
+ return fmtText
}
- return FmtUnknown
+ return fmtUnknown
}
// NewDecoder returns a new decoder based on the given input format.
// If the input format does not imply otherwise, a text format decoder is returned.
func NewDecoder(r io.Reader, format Format) Decoder {
- switch format {
- case FmtProtoDelim:
- return &protoDecoder{r: r}
+ switch format.FormatType() {
+ case TypeProtoDelim:
+ return &protoDecoder{r: bufio.NewReader(r)}
}
return &textDecoder{r: r}
}
// protoDecoder implements the Decoder interface for protocol buffers.
type protoDecoder struct {
- r io.Reader
+ r protodelim.Reader
}
// Decode implements the Decoder interface.
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
- _, err := pbutil.ReadDelimited(d.r, v)
- if err != nil {
+ opts := protodelim.UnmarshalOptions{
+ MaxSize: -1,
+ }
+ if err := opts.UnmarshalFrom(d.r, v); err != nil {
return err
}
if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
@@ -115,32 +118,31 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
// textDecoder implements the Decoder interface for the text protocol.
type textDecoder struct {
r io.Reader
- p TextParser
- fams []*dto.MetricFamily
+ fams map[string]*dto.MetricFamily
+ err error
}
// Decode implements the Decoder interface.
func (d *textDecoder) Decode(v *dto.MetricFamily) error {
- // TODO(fabxc): Wrap this as a line reader to make streaming safer.
- if len(d.fams) == 0 {
- // No cached metric families, read everything and parse metrics.
- fams, err := d.p.TextToMetricFamilies(d.r)
- if err != nil {
- return err
- }
- if len(fams) == 0 {
- return io.EOF
- }
- d.fams = make([]*dto.MetricFamily, 0, len(fams))
- for _, f := range fams {
- d.fams = append(d.fams, f)
+ if d.err == nil {
+ // Read all metrics in one shot.
+ var p TextParser
+ d.fams, d.err = p.TextToMetricFamilies(d.r)
+ // If we don't get an error, store io.EOF for the end.
+ if d.err == nil {
+ d.err = io.EOF
}
}
-
- *v = *d.fams[0]
- d.fams = d.fams[1:]
-
- return nil
+ // Pick off one MetricFamily per Decode until there's nothing left.
+ for key, fam := range d.fams {
+ v.Name = fam.Name
+ v.Help = fam.Help
+ v.Type = fam.Type
+ v.Metric = fam.Metric
+ delete(d.fams, key)
+ return nil
+ }
+ return d.err
}
// SampleDecoder wraps a Decoder to extract samples from the metric families
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
index 64dc0eb40c2..ff5ef7a9d92 100644
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -18,9 +18,12 @@ import (
"io"
"net/http"
- "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/matttproud/golang_protobuf_extensions/pbutil"
- "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+ "google.golang.org/protobuf/encoding/protodelim"
+ "google.golang.org/protobuf/encoding/prototext"
+
+ "github.com/prometheus/common/model"
+
+ "github.com/munnerz/goautoneg"
dto "github.com/prometheus/client_model/go"
)
@@ -60,23 +63,32 @@ func (ec encoderCloser) Close() error {
// as the support is still experimental. To include the option to negotiate
// FmtOpenMetrics, use NegotiateOpenMetrics.
func Negotiate(h http.Header) Format {
+ escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
+ switch Format(escapeParam) {
+ case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
+ escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
+ default:
+ // If the escaping parameter is unknown, ignore it.
+ }
+ }
ver := ac.Params["version"]
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
- return FmtProtoDelim
+ return fmtProtoDelim + escapingScheme
case "text":
- return FmtProtoText
+ return fmtProtoText + escapingScheme
case "compact-text":
- return FmtProtoCompact
+ return fmtProtoCompact + escapingScheme
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return FmtText
+ return fmtText + escapingScheme
}
}
- return FmtText
+ return fmtText + escapingScheme
}
// NegotiateIncludingOpenMetrics works like Negotiate but includes
@@ -84,26 +96,40 @@ func Negotiate(h http.Header) Format {
// temporary and will disappear once FmtOpenMetrics is fully supported and as
// such may be negotiated by the normal Negotiate function.
func NegotiateIncludingOpenMetrics(h http.Header) Format {
+ escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
+ switch Format(escapeParam) {
+ case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
+ escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam))
+ default:
+ // If the escaping parameter is unknown, ignore it.
+ }
+ }
ver := ac.Params["version"]
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
- return FmtProtoDelim
+ return fmtProtoDelim + escapingScheme
case "text":
- return FmtProtoText
+ return fmtProtoText + escapingScheme
case "compact-text":
- return FmtProtoCompact
+ return fmtProtoCompact + escapingScheme
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return FmtText
+ return fmtText + escapingScheme
}
- if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") {
- return FmtOpenMetrics
+ if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
+ switch ver {
+ case OpenMetricsVersion_1_0_0:
+ return fmtOpenMetrics_1_0_0 + escapingScheme
+ default:
+ return fmtOpenMetrics_0_0_1 + escapingScheme
+ }
}
}
- return FmtText
+ return fmtText + escapingScheme
}
// NewEncoder returns a new encoder based on content type negotiation. All
@@ -112,44 +138,54 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
// for FmtOpenMetrics, but a future (breaking) release will add the Close method
// to the Encoder interface directly. The current version of the Encoder
// interface is kept for backwards compatibility.
-func NewEncoder(w io.Writer, format Format) Encoder {
- switch format {
- case FmtProtoDelim:
+// In cases where the Format does not allow for UTF-8 names, the global
+// NameEscapingScheme will be applied.
+//
+// NewEncoder can be called with additional options to customize the OpenMetrics text output.
+// For example:
+// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines())
+//
+// Extra options are ignored for all other formats.
+func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder {
+ escapingScheme := format.ToEscapingScheme()
+
+ switch format.FormatType() {
+ case TypeProtoDelim:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := pbutil.WriteDelimited(w, v)
+ _, err := protodelim.MarshalTo(w, v)
return err
},
close: func() error { return nil },
}
- case FmtProtoCompact:
+ case TypeProtoCompact:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := fmt.Fprintln(w, v.String())
+ _, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String())
return err
},
close: func() error { return nil },
}
- case FmtProtoText:
+ case TypeProtoText:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ _, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme)))
return err
},
close: func() error { return nil },
}
- case FmtText:
+ case TypeTextPlain:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := MetricFamilyToText(w, v)
+ _, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme))
return err
},
close: func() error { return nil },
}
- case FmtOpenMetrics:
+ case TypeOpenMetrics:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := MetricFamilyToOpenMetrics(w, v)
+ _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...)
return err
},
close: func() error {
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
index 0f176fa64f2..051b38cd178 100644
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -14,28 +14,164 @@
// Package expfmt contains tools for reading and writing Prometheus metrics.
package expfmt
+import (
+ "fmt"
+ "strings"
+
+ "github.com/prometheus/common/model"
+)
+
// Format specifies the HTTP content type of the different wire protocols.
type Format string
-// Constants to assemble the Content-Type values for the different wire protocols.
+// Constants to assemble the Content-Type values for the different wire
+// protocols. The Content-Type strings here are all for the legacy exposition
+// formats, where valid characters for metric names and label names are limited.
+// Support for arbitrary UTF-8 characters in those names is already partially
+// implemented in this module (see model.ValidationScheme), but to actually use
+// it on the wire, new content-type strings will have to be agreed upon and
+// added here.
const (
- TextVersion = "0.0.4"
- ProtoType = `application/vnd.google.protobuf`
- ProtoProtocol = `io.prometheus.client.MetricFamily`
- ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
- OpenMetricsType = `application/openmetrics-text`
- OpenMetricsVersion = "0.0.1"
-
- // The Content-Type values for the different wire protocols.
- FmtUnknown Format = ``
- FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
- FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
- FmtProtoText Format = ProtoFmt + ` encoding=text`
- FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
- FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8`
+ TextVersion = "0.0.4"
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+ OpenMetricsType = `application/openmetrics-text`
+ OpenMetricsVersion_0_0_1 = "0.0.1"
+ OpenMetricsVersion_1_0_0 = "1.0.0"
+
+ // The Content-Type values for the different wire protocols. Note that these
+ // values are now unexported. If code was relying on comparisons to these
+ // constants, instead use FormatType().
+ fmtUnknown Format = ``
+ fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
+ fmtProtoDelim Format = protoFmt + ` encoding=delimited`
+ fmtProtoText Format = protoFmt + ` encoding=text`
+ fmtProtoCompact Format = protoFmt + ` encoding=compact-text`
+ fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
+ fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
)
const (
hdrContentType = "Content-Type"
hdrAccept = "Accept"
)
+
+// FormatType is a Go enum representing the overall category for the given
+// Format. As the number of Format permutations increases, doing basic string
+// comparisons are not feasible, so this enum captures the most useful
+// high-level attribute of the Format string.
+type FormatType int
+
+const (
+ TypeUnknown FormatType = iota
+ TypeProtoCompact
+ TypeProtoDelim
+ TypeProtoText
+ TypeTextPlain
+ TypeOpenMetrics
+)
+
+// NewFormat generates a new Format from the type provided. Mostly used for
+// tests, most Formats should be generated as part of content negotiation in
+// encode.go. If a type has more than one version, the latest version will be
+// returned.
+func NewFormat(t FormatType) Format {
+ switch t {
+ case TypeProtoCompact:
+ return fmtProtoCompact
+ case TypeProtoDelim:
+ return fmtProtoDelim
+ case TypeProtoText:
+ return fmtProtoText
+ case TypeTextPlain:
+ return fmtText
+ case TypeOpenMetrics:
+ return fmtOpenMetrics_1_0_0
+ default:
+ return fmtUnknown
+ }
+}
+
+// NewOpenMetricsFormat generates a new OpenMetrics format matching the
+// specified version number.
+func NewOpenMetricsFormat(version string) (Format, error) {
+ if version == OpenMetricsVersion_0_0_1 {
+ return fmtOpenMetrics_0_0_1, nil
+ }
+ if version == OpenMetricsVersion_1_0_0 {
+ return fmtOpenMetrics_1_0_0, nil
+ }
+ return fmtUnknown, fmt.Errorf("unknown open metrics version string")
+}
+
+// FormatType deduces an overall FormatType for the given format.
+func (f Format) FormatType() FormatType {
+ toks := strings.Split(string(f), ";")
+ params := make(map[string]string)
+ for i, t := range toks {
+ if i == 0 {
+ continue
+ }
+ args := strings.Split(t, "=")
+ if len(args) != 2 {
+ continue
+ }
+ params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1])
+ }
+
+ switch strings.TrimSpace(toks[0]) {
+ case ProtoType:
+ if params["proto"] != ProtoProtocol {
+ return TypeUnknown
+ }
+ switch params["encoding"] {
+ case "delimited":
+ return TypeProtoDelim
+ case "text":
+ return TypeProtoText
+ case "compact-text":
+ return TypeProtoCompact
+ default:
+ return TypeUnknown
+ }
+ case OpenMetricsType:
+ if params["charset"] != "utf-8" {
+ return TypeUnknown
+ }
+ return TypeOpenMetrics
+ case "text/plain":
+ v, ok := params["version"]
+ if !ok {
+ return TypeTextPlain
+ }
+ if v == TextVersion {
+ return TypeTextPlain
+ }
+ return TypeUnknown
+ default:
+ return TypeUnknown
+ }
+}
+
+// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the
+// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid
+// "escaping" term exists, that will be used. Otherwise, the global default will
+// be returned.
+func (format Format) ToEscapingScheme() model.EscapingScheme {
+ for _, p := range strings.Split(string(format), ";") {
+ toks := strings.Split(p, "=")
+ if len(toks) != 2 {
+ continue
+ }
+ key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1])
+ if key == model.EscapingKey {
+ scheme, err := model.ToEscapingScheme(value)
+ if err != nil {
+ return model.NameEscapingScheme
+ }
+ return scheme
+ }
+ }
+ return model.NameEscapingScheme
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
index dc2eedeefca..dfac962a4e7 100644
--- a/vendor/github.com/prometheus/common/expfmt/fuzz.go
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -12,6 +12,7 @@
// limitations under the License.
// Build only when actually fuzzing
+//go:build gofuzz
// +build gofuzz
package expfmt
@@ -20,8 +21,8 @@ import "bytes"
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
//
-// go-fuzz-build github.com/prometheus/common/expfmt
-// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
//
// Further input samples should go in the folder fuzz/corpus.
func Fuzz(in []byte) int {
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
index 8a9313a3bee..353c5e93f92 100644
--- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -22,12 +22,47 @@ import (
"strconv"
"strings"
- "github.com/golang/protobuf/ptypes"
+ "google.golang.org/protobuf/types/known/timestamppb"
+
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
+type encoderOption struct {
+ withCreatedLines bool
+ withUnit bool
+}
+
+type EncoderOption func(*encoderOption)
+
+// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
+// to include _created lines (See
+// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1).
+// Created timestamps can improve the accuracy of series reset detection, but
+// come with a bandwidth cost.
+//
+// At the time of writing, created timestamp ingestion is still experimental in
+// Prometheus and need to be enabled with the feature-flag
+// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are
+// still possible. Therefore, it is recommended to use this feature with caution.
+func WithCreatedLines() EncoderOption {
+ return func(t *encoderOption) {
+ t.withCreatedLines = true
+ }
+}
+
+// WithUnit is an EncoderOption enabling a set unit to be written to the output
+// and to be added to the metric name, if it's not there already, as a suffix.
+// Without opting in this way, the unit will not be added to the metric name and,
+// on top of that, the unit will not be passed onto the output, even if it
+// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil.
+func WithUnit() EncoderOption {
+ return func(t *encoderOption) {
+ t.withUnit = true
+ }
+}
+
// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
// OpenMetrics text format and writes the resulting lines to 'out'. It returns
// the number of bytes written and any error encountered. The output will have
@@ -36,6 +71,18 @@ import (
// sanity checks. If the input contains duplicate metrics or invalid metric or
// label names, the conversion will result in invalid text format output.
//
+// If metric names conform to the legacy validation pattern, they will be placed
+// outside the brackets in the traditional way, like `foo{}`. If the metric name
+// fails the legacy validation check, it will be placed quoted inside the
+// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// Similar to metric names, if label names conform to the legacy validation
+// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
+// name fails the legacy validation check, it will be quoted:
+// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
// This function fulfills the type 'expfmt.encoder'.
//
// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
@@ -47,21 +94,35 @@ import (
// missing features and peculiarities to avoid complications when switching from
// Prometheus to OpenMetrics or vice versa:
//
-// - Counters are expected to have the `_total` suffix in their metric name. In
-// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
-// line. A counter with a missing `_total` suffix is not an error. However,
-// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
-// output.
+// - Counters are expected to have the `_total` suffix in their metric name. In
+// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT`
+// lines. A counter with a missing `_total` suffix is not an error. However,
+// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
+// output.
//
-// - No support for the following (optional) features: `# UNIT` line, `_created`
-// line, info type, stateset type, gaugehistogram type.
+// - According to the OM specs, the `# UNIT` line is optional, but if populated,
+// the unit has to be present in the metric name as its suffix:
+// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit).
+// However, in order to accommodate any potential scenario where such a change in the
+// metric name is not desirable, the users are here given the choice of either explicitly
+// opt in, in case they wish for the unit to be included in the output AND in the metric name
+// as a suffix (see the description of the WithUnit function above),
+// or not to opt in, in case they don't want for any of that to happen.
//
-// - The size of exemplar labels is not checked (i.e. it's possible to create
-// exemplars that are larger than allowed by the OpenMetrics specification).
+// - No support for the following (optional) features: info type,
+// stateset type, gaugehistogram type.
//
-// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
-// with a `NaN` value.)
-func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
+// - The size of exemplar labels is not checked (i.e. it's possible to create
+// exemplars that are larger than allowed by the OpenMetrics specification).
+//
+// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
+// with a `NaN` value.)
+func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) {
+ toOM := encoderOption{}
+ for _, option := range options {
+ option(&toOM)
+ }
+
name := in.GetName()
if name == "" {
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
@@ -84,12 +145,15 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
}
var (
- n int
- metricType = in.GetType()
- shortName = name
+ n int
+ metricType = in.GetType()
+ compliantName = name
)
- if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
- shortName = name[:len(name)-6]
+ if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
+ compliantName = name[:len(name)-6]
+ }
+ if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) {
+ compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit)
}
// Comments, first HELP, then TYPE.
@@ -99,7 +163,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
if err != nil {
return
}
- n, err = w.WriteString(shortName)
+ n, err = writeName(w, compliantName)
written += n
if err != nil {
return
@@ -125,7 +189,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
if err != nil {
return
}
- n, err = w.WriteString(shortName)
+ n, err = writeName(w, compliantName)
written += n
if err != nil {
return
@@ -152,55 +216,89 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
if err != nil {
return
}
+ if toOM.withUnit && in.Unit != nil {
+ n, err = w.WriteString("# UNIT ")
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = writeName(w, compliantName)
+ written += n
+ if err != nil {
+ return
+ }
+
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return
+ }
+ n, err = writeEscapedString(w, *in.Unit, true)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return
+ }
+ }
+
+ var createdTsBytesWritten int
// Finally the samples, one line for each.
+ if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") {
+ compliantName = compliantName + "_total"
+ }
for _, metric := range in.Metric {
switch metricType {
case dto.MetricType_COUNTER:
if metric.Counter == nil {
return written, fmt.Errorf(
- "expected counter in metric %s %s", name, metric,
+ "expected counter in metric %s %s", compliantName, metric,
)
}
- // Note that we have ensured above that either the name
- // ends on `_total` or that the rendered type is
- // `unknown`. Therefore, no `_total` must be added here.
n, err = writeOpenMetricsSample(
- w, name, "", metric, "", 0,
+ w, compliantName, "", metric, "", 0,
metric.Counter.GetValue(), 0, false,
metric.Counter.Exemplar,
)
+ if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil {
+ createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp())
+ n += createdTsBytesWritten
+ }
case dto.MetricType_GAUGE:
if metric.Gauge == nil {
return written, fmt.Errorf(
- "expected gauge in metric %s %s", name, metric,
+ "expected gauge in metric %s %s", compliantName, metric,
)
}
n, err = writeOpenMetricsSample(
- w, name, "", metric, "", 0,
+ w, compliantName, "", metric, "", 0,
metric.Gauge.GetValue(), 0, false,
nil,
)
case dto.MetricType_UNTYPED:
if metric.Untyped == nil {
return written, fmt.Errorf(
- "expected untyped in metric %s %s", name, metric,
+ "expected untyped in metric %s %s", compliantName, metric,
)
}
n, err = writeOpenMetricsSample(
- w, name, "", metric, "", 0,
+ w, compliantName, "", metric, "", 0,
metric.Untyped.GetValue(), 0, false,
nil,
)
case dto.MetricType_SUMMARY:
if metric.Summary == nil {
return written, fmt.Errorf(
- "expected summary in metric %s %s", name, metric,
+ "expected summary in metric %s %s", compliantName, metric,
)
}
for _, q := range metric.Summary.Quantile {
n, err = writeOpenMetricsSample(
- w, name, "", metric,
+ w, compliantName, "", metric,
model.QuantileLabel, q.GetQuantile(),
q.GetValue(), 0, false,
nil,
@@ -211,7 +309,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
}
}
n, err = writeOpenMetricsSample(
- w, name, "_sum", metric, "", 0,
+ w, compliantName, "_sum", metric, "", 0,
metric.Summary.GetSampleSum(), 0, false,
nil,
)
@@ -220,20 +318,24 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
return
}
n, err = writeOpenMetricsSample(
- w, name, "_count", metric, "", 0,
+ w, compliantName, "_count", metric, "", 0,
0, metric.Summary.GetSampleCount(), true,
nil,
)
+ if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil {
+ createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp())
+ n += createdTsBytesWritten
+ }
case dto.MetricType_HISTOGRAM:
if metric.Histogram == nil {
return written, fmt.Errorf(
- "expected histogram in metric %s %s", name, metric,
+ "expected histogram in metric %s %s", compliantName, metric,
)
}
infSeen := false
for _, b := range metric.Histogram.Bucket {
n, err = writeOpenMetricsSample(
- w, name, "_bucket", metric,
+ w, compliantName, "_bucket", metric,
model.BucketLabel, b.GetUpperBound(),
0, b.GetCumulativeCount(), true,
b.Exemplar,
@@ -248,7 +350,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
}
if !infSeen {
n, err = writeOpenMetricsSample(
- w, name, "_bucket", metric,
+ w, compliantName, "_bucket", metric,
model.BucketLabel, math.Inf(+1),
0, metric.Histogram.GetSampleCount(), true,
nil,
@@ -259,7 +361,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
}
}
n, err = writeOpenMetricsSample(
- w, name, "_sum", metric, "", 0,
+ w, compliantName, "_sum", metric, "", 0,
metric.Histogram.GetSampleSum(), 0, false,
nil,
)
@@ -268,13 +370,17 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int
return
}
n, err = writeOpenMetricsSample(
- w, name, "_count", metric, "", 0,
+ w, compliantName, "_count", metric, "", 0,
0, metric.Histogram.GetSampleCount(), true,
nil,
)
+ if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil {
+ createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp())
+ n += createdTsBytesWritten
+ }
default:
return written, fmt.Errorf(
- "unexpected type in metric %s %s", name, metric,
+ "unexpected type in metric %s %s", compliantName, metric,
)
}
written += n
@@ -304,21 +410,9 @@ func writeOpenMetricsSample(
floatValue float64, intValue uint64, useIntValue bool,
exemplar *dto.Exemplar,
) (int, error) {
- var written int
- n, err := w.WriteString(name)
- written += n
- if err != nil {
- return written, err
- }
- if suffix != "" {
- n, err = w.WriteString(suffix)
- written += n
- if err != nil {
- return written, err
- }
- }
- n, err = writeOpenMetricsLabelPairs(
- w, metric.Label, additionalLabelName, additionalLabelValue,
+ written := 0
+ n, err := writeOpenMetricsNameAndLabelPairs(
+ w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
@@ -351,7 +445,7 @@ func writeOpenMetricsSample(
return written, err
}
}
- if exemplar != nil {
+ if exemplar != nil && len(exemplar.Label) > 0 {
n, err = writeExemplar(w, exemplar)
written += n
if err != nil {
@@ -366,27 +460,58 @@ func writeOpenMetricsSample(
return written, nil
}
-// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
-// in OpenMetrics style.
-func writeOpenMetricsLabelPairs(
+// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but
+// formats the float in OpenMetrics style.
+func writeOpenMetricsNameAndLabelPairs(
w enhancedWriter,
+ name string,
in []*dto.LabelPair,
additionalLabelName string, additionalLabelValue float64,
) (int, error) {
- if len(in) == 0 && additionalLabelName == "" {
- return 0, nil
- }
var (
- written int
- separator byte = '{'
+ written int
+ separator byte = '{'
+ metricInsideBraces = false
)
+
+ if name != "" {
+ // If the name does not pass the legacy validity check, we must put the
+ // metric name inside the braces, quoted.
+ if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
+ metricInsideBraces = true
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+
+ n, err := writeName(w, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+
+ if len(in) == 0 && additionalLabelName == "" {
+ if metricInsideBraces {
+ err := w.WriteByte('}')
+ written++
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+ }
+
for _, lp := range in {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
- n, err := w.WriteString(lp.GetName())
+ n, err := writeName(w, lp.GetName())
written += n
if err != nil {
return written, err
@@ -443,6 +568,49 @@ func writeOpenMetricsLabelPairs(
return written, nil
}
+// writeOpenMetricsCreated writes the created timestamp for a single time series
+// following OpenMetrics text format to w, given the metric name, the metric proto
+// message itself, optionally a suffix to be removed, e.g. '_total' for counters,
+// an additional label name with a float64 value (use empty string as label name if
+// not required) and the timestamp that represents the created timestamp.
+// The function returns the number of bytes written and any error encountered.
+func writeOpenMetricsCreated(w enhancedWriter,
+ name, suffixToTrim string, metric *dto.Metric,
+ additionalLabelName string, additionalLabelValue float64,
+ createdTimestamp *timestamppb.Timestamp,
+) (int, error) {
+ written := 0
+ n, err := writeOpenMetricsNameAndLabelPairs(
+ w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+
+ // TODO(beorn7): Format this directly from components of ts to
+ // avoid overflow/underflow and precision issues of the float
+ // conversion.
+ n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9)
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
// function returns the number of bytes written and any error encountered.
func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
@@ -452,7 +620,7 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
if err != nil {
return written, err
}
- n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
+ n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0)
written += n
if err != nil {
return written, err
@@ -473,10 +641,11 @@ func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
if err != nil {
return written, err
}
- ts, err := ptypes.Timestamp((*e).Timestamp)
+ err = (*e).Timestamp.CheckValid()
if err != nil {
return written, err
}
+ ts := (*e).Timestamp.AsTime()
// TODO(beorn7): Format this directly from components of ts to
// avoid overflow/underflow and precision issues of the float
// conversion.
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
index 5ba503b0654..f9b8265a9ec 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -17,7 +17,6 @@ import (
"bufio"
"fmt"
"io"
- "io/ioutil"
"math"
"strconv"
"strings"
@@ -44,7 +43,7 @@ const (
var (
bufPool = sync.Pool{
New: func() interface{} {
- return bufio.NewWriter(ioutil.Discard)
+ return bufio.NewWriter(io.Discard)
},
}
numBufPool = sync.Pool{
@@ -63,6 +62,18 @@ var (
// contains duplicate metrics or invalid metric or label names, the conversion
// will result in invalid text format output.
//
+// If metric names conform to the legacy validation pattern, they will be placed
+// outside the brackets in the traditional way, like `foo{}`. If the metric name
+// fails the legacy validation check, it will be placed quoted inside the
+// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// Similar to metric names, if label names conform to the legacy validation
+// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
+// name fails the legacy validation check, it will be quoted:
+// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
// This method fulfills the type 'prometheus.encoder'.
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
// Fail-fast checks.
@@ -99,7 +110,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
if err != nil {
return
}
- n, err = w.WriteString(name)
+ n, err = writeName(w, name)
written += n
if err != nil {
return
@@ -125,7 +136,7 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err e
if err != nil {
return
}
- n, err = w.WriteString(name)
+ n, err = writeName(w, name)
written += n
if err != nil {
return
@@ -281,21 +292,9 @@ func writeSample(
additionalLabelName string, additionalLabelValue float64,
value float64,
) (int, error) {
- var written int
- n, err := w.WriteString(name)
- written += n
- if err != nil {
- return written, err
- }
- if suffix != "" {
- n, err = w.WriteString(suffix)
- written += n
- if err != nil {
- return written, err
- }
- }
- n, err = writeLabelPairs(
- w, metric.Label, additionalLabelName, additionalLabelValue,
+ written := 0
+ n, err := writeNameAndLabelPairs(
+ w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
@@ -331,32 +330,64 @@ func writeSample(
return written, nil
}
-// writeLabelPairs converts a slice of LabelPair proto messages plus the
-// explicitly given additional label pair into text formatted as required by the
-// text format and writes it to 'w'. An empty slice in combination with an empty
-// string 'additionalLabelName' results in nothing being written. Otherwise, the
-// label pairs are written, escaped as required by the text format, and enclosed
-// in '{...}'. The function returns the number of bytes written and any error
-// encountered.
-func writeLabelPairs(
+// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the
+// explicitly given metric name and additional label pair into text formatted as
+// required by the text format and writes it to 'w'. An empty slice in
+// combination with an empty string 'additionalLabelName' results in nothing
+// being written. Otherwise, the label pairs are written, escaped as required by
+// the text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered. If the metric name is not
+// legacy-valid, it will be put inside the brackets as well. Legacy-invalid
+// label names will also be quoted.
+func writeNameAndLabelPairs(
w enhancedWriter,
+ name string,
in []*dto.LabelPair,
additionalLabelName string, additionalLabelValue float64,
) (int, error) {
- if len(in) == 0 && additionalLabelName == "" {
- return 0, nil
- }
var (
- written int
- separator byte = '{'
+ written int
+ separator byte = '{'
+ metricInsideBraces = false
)
+
+ if name != "" {
+ // If the name does not pass the legacy validity check, we must put the
+ // metric name inside the braces.
+ if !model.IsValidLegacyMetricName(model.LabelValue(name)) {
+ metricInsideBraces = true
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ n, err := writeName(w, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+
+ if len(in) == 0 && additionalLabelName == "" {
+ if metricInsideBraces {
+ err := w.WriteByte('}')
+ written++
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+ }
+
for _, lp := range in {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
- n, err := w.WriteString(lp.GetName())
+ n, err := writeName(w, lp.GetName())
written += n
if err != nil {
return written, err
@@ -463,3 +494,27 @@ func writeInt(w enhancedWriter, i int64) (int, error) {
numBufPool.Put(bp)
return written, err
}
+
+// writeName writes a string as-is if it complies with the legacy naming
+// scheme, or escapes it in double quotes if not.
+func writeName(w enhancedWriter, name string) (int, error) {
+ if model.IsValidLegacyMetricName(model.LabelValue(name)) {
+ return w.WriteString(name)
+ }
+ var written int
+ var err error
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
+ var n int
+ n, err = writeEscapedString(w, name, true)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte('"')
+ written++
+ return written, err
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index 84be0643ec6..26490211af2 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -16,6 +16,7 @@ package expfmt
import (
"bufio"
"bytes"
+ "errors"
"fmt"
"io"
"math"
@@ -24,7 +25,8 @@ import (
dto "github.com/prometheus/client_model/go"
- "github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
+ "google.golang.org/protobuf/proto"
+
"github.com/prometheus/common/model"
)
@@ -112,7 +114,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF
// stream. Turn this error into something nicer and more
// meaningful. (io.EOF is often used as a signal for the legitimate end
// of an input stream.)
- if p.err == io.EOF {
+ if p.err != nil && errors.Is(p.err, io.EOF) {
p.parseError("unexpected end of input stream")
}
return p.metricFamiliesByName, p.err
@@ -142,9 +144,13 @@ func (p *TextParser) reset(in io.Reader) {
func (p *TextParser) startOfLine() stateFn {
p.lineCount++
if p.skipBlankTab(); p.err != nil {
- // End of input reached. This is the only case where
- // that is not an error but a signal that we are done.
- p.err = nil
+ // This is the only place that we expect to see io.EOF,
+ // which is not an error but the signal that we are done.
+ // Any other error that happens to align with the start of
+ // a line is still an error.
+ if errors.Is(p.err, io.EOF) {
+ p.err = nil
+ }
return nil
}
switch p.currentByte {
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
index 35e739c7ad2..80d1fe944ea 100644
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -75,7 +75,12 @@ func (a *Alert) ResolvedAt(ts time.Time) bool {
// Status returns the status of the alert.
func (a *Alert) Status() AlertStatus {
- if a.Resolved() {
+ return a.StatusAt(time.Now())
+}
+
+// StatusAt returns the status of the alert at the given timestamp.
+func (a *Alert) StatusAt(ts time.Time) AlertStatus {
+ if a.ResolvedAt(ts) {
return AlertResolved
}
return AlertFiring
@@ -90,13 +95,13 @@ func (a *Alert) Validate() error {
return fmt.Errorf("start time must be before end time")
}
if err := a.Labels.Validate(); err != nil {
- return fmt.Errorf("invalid label set: %s", err)
+ return fmt.Errorf("invalid label set: %w", err)
}
if len(a.Labels) == 0 {
return fmt.Errorf("at least one label pair required")
}
if err := a.Annotations.Validate(); err != nil {
- return fmt.Errorf("invalid annotations: %s", err)
+ return fmt.Errorf("invalid annotations: %w", err)
}
return nil
}
@@ -127,6 +132,17 @@ func (as Alerts) HasFiring() bool {
return false
}
+// HasFiringAt returns true iff one of the alerts is not resolved
+// at the time ts.
+func (as Alerts) HasFiringAt(ts time.Time) bool {
+ for _, a := range as {
+ if !a.ResolvedAt(ts) {
+ return true
+ }
+ }
+ return false
+}
+
// Status returns StatusFiring iff at least one of the alerts is firing.
func (as Alerts) Status() AlertStatus {
if as.HasFiring() {
@@ -134,3 +150,12 @@ func (as Alerts) Status() AlertStatus {
}
return AlertResolved
}
+
+// StatusAt returns StatusFiring iff at least one of the alerts is firing
+// at the time ts.
+func (as Alerts) StatusAt(ts time.Time) AlertStatus {
+ if as.HasFiringAt(ts) {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
index ef895633546..3317ce22ff7 100644
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -97,17 +97,25 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
// therewith.
type LabelName string
-// IsValid is true iff the label name matches the pattern of LabelNameRE. This
-// method, however, does not use LabelNameRE for the check but a much faster
-// hardcoded implementation.
+// IsValid returns true iff name matches the pattern of LabelNameRE for legacy
+// names, and iff it's valid UTF-8 if NameValidationScheme is set to
+// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the
+// check but a much faster hardcoded implementation.
func (ln LabelName) IsValid() bool {
if len(ln) == 0 {
return false
}
- for i, b := range ln {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
- return false
+ switch NameValidationScheme {
+ case LegacyValidation:
+ for i, b := range ln {
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
+ return false
+ }
}
+ case UTF8Validation:
+ return utf8.ValidString(string(ln))
+ default:
+ panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
}
return true
}
@@ -164,7 +172,7 @@ func (l LabelNames) String() string {
// A LabelValue is an associated value for a LabelName.
type LabelValue string
-// IsValid returns true iff the string is a valid UTF8.
+// IsValid returns true iff the string is a valid UTF-8.
func (lv LabelValue) IsValid() bool {
return utf8.ValidString(string(lv))
}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
index 6eda08a7395..d0ad88da334 100644
--- a/vendor/github.com/prometheus/common/model/labelset.go
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -17,7 +17,6 @@ import (
"encoding/json"
"fmt"
"sort"
- "strings"
)
// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
@@ -129,16 +128,6 @@ func (l LabelSet) Merge(other LabelSet) LabelSet {
return result
}
-func (l LabelSet) String() string {
- lstrs := make([]string, 0, len(l))
- for l, v := range l {
- lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
- }
-
- sort.Strings(lstrs)
- return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
-}
-
// Fingerprint returns the LabelSet's fingerprint.
func (ls LabelSet) Fingerprint() Fingerprint {
return labelSetToFingerprint(ls)
diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go
new file mode 100644
index 00000000000..481c47b46e5
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset_string.go
@@ -0,0 +1,45 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.21
+
+package model
+
+import (
+ "bytes"
+ "slices"
+ "strconv"
+)
+
+// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically.
+func (l LabelSet) String() string {
+ var lna [32]string // On stack to avoid memory allocation for sorting names.
+ labelNames := lna[:0]
+ for name := range l {
+ labelNames = append(labelNames, string(name))
+ }
+ slices.Sort(labelNames)
+ var bytea [1024]byte // On stack to avoid memory allocation while building the output.
+ b := bytes.NewBuffer(bytea[:0])
+ b.WriteByte('{')
+ for i, name := range labelNames {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(name)
+ b.WriteByte('=')
+ b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)])))
+ }
+ b.WriteByte('}')
+ return b.String()
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go
new file mode 100644
index 00000000000..c4212685e71
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset_string_go120.go
@@ -0,0 +1,39 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !go1.21
+
+package model
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// String was optimized using functions not available for go 1.20
+// or lower. We keep the old implementation for compatibility with client_golang.
+// Once client golang drops support for go 1.20 (scheduled for August 2024), this
+// file can be removed.
+func (l LabelSet) String() string {
+ labelNames := make([]string, 0, len(l))
+ for name := range l {
+ labelNames = append(labelNames, string(name))
+ }
+ sort.Strings(labelNames)
+ lstrs := make([]string, 0, len(l))
+ for _, name := range labelNames {
+ lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)]))
+ }
+ return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
+}
diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go
new file mode 100644
index 00000000000..447ab8ad635
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metadata.go
@@ -0,0 +1,28 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// MetricType represents metric type values.
+type MetricType string
+
+const (
+ MetricTypeCounter = MetricType("counter")
+ MetricTypeGauge = MetricType("gauge")
+ MetricTypeHistogram = MetricType("histogram")
+ MetricTypeGaugeHistogram = MetricType("gaugehistogram")
+ MetricTypeSummary = MetricType("summary")
+ MetricTypeInfo = MetricType("info")
+ MetricTypeStateset = MetricType("stateset")
+ MetricTypeUnknown = MetricType("unknown")
+)
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index 00804b7fedb..eb865e5a59c 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -18,15 +18,84 @@ import (
"regexp"
"sort"
"strings"
+ "unicode/utf8"
+
+ dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
)
var (
- // MetricNameRE is a regular expression matching valid metric
- // names. Note that the IsValidMetricName function performs the same
- // check but faster than a match with this regular expression.
- MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+ // NameValidationScheme determines the method of name validation to be used by
+ // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode
+ // in isolation from other components that don't support UTF-8 may result in
+ // bugs or other undefined behavior. This value is intended to be set by
+ // UTF-8-aware binaries as part of their startup. To avoid need for locking,
+ // this value should be set once, ideally in an init(), before multiple
+ // goroutines are started.
+ NameValidationScheme = LegacyValidation
+
+ // NameEscapingScheme defines the default way that names will be
+ // escaped when presented to systems that do not support UTF-8 names. If the
+ // Content-Type "escaping" term is specified, that will override this value.
+ NameEscapingScheme = ValueEncodingEscaping
+)
+
+// ValidationScheme is a Go enum for determining how metric and label names will
+// be validated by this library.
+type ValidationScheme int
+
+const (
+ // LegacyValidation is a setting that requirets that metric and label names
+ // conform to the original Prometheus character requirements described by
+ // MetricNameRE and LabelNameRE.
+ LegacyValidation ValidationScheme = iota
+
+ // UTF8Validation only requires that metric and label names be valid UTF-8
+ // strings.
+ UTF8Validation
+)
+
+type EscapingScheme int
+
+const (
+ // NoEscaping indicates that a name will not be escaped. Unescaped names that
+ // do not conform to the legacy validity check will use a new exposition
+ // format syntax that will be officially standardized in future versions.
+ NoEscaping EscapingScheme = iota
+
+ // UnderscoreEscaping replaces all legacy-invalid characters with underscores.
+ UnderscoreEscaping
+
+ // DotsEscaping is similar to UnderscoreEscaping, except that dots are
+ // converted to `_dot_` and pre-existing underscores are converted to `__`.
+ DotsEscaping
+
+ // ValueEncodingEscaping prepends the name with `U__` and replaces all invalid
+ // characters with the unicode value, surrounded by underscores. Single
+ // underscores are replaced with double underscores.
+ ValueEncodingEscaping
+)
+
+const (
+ // EscapingKey is the key in an Accept or Content-Type header that defines how
+ // metric and label names that do not conform to the legacy character
+ // requirements should be escaped when being scraped by a legacy prometheus
+ // system. If a system does not explicitly pass an escaping parameter in the
+ // Accept header, the default NameEscapingScheme will be used.
+ EscapingKey = "escaping"
+
+ // Possible values for Escaping Key:
+ AllowUTF8 = "allow-utf-8" // No escaping required.
+ EscapeUnderscores = "underscores"
+ EscapeDots = "dots"
+ EscapeValues = "values"
)
+// MetricNameRE is a regular expression matching valid metric
+// names. Note that the IsValidMetricName function performs the same
+// check but faster than a match with this regular expression.
+var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
// a singleton and refers to one and only one stream of samples.
type Metric LabelSet
@@ -86,17 +155,303 @@ func (m Metric) FastFingerprint() Fingerprint {
return LabelSet(m).FastFingerprint()
}
-// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE
+// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is
+// selected.
+func IsValidMetricName(n LabelValue) bool {
+ switch NameValidationScheme {
+ case LegacyValidation:
+ return IsValidLegacyMetricName(n)
+ case UTF8Validation:
+ if len(n) == 0 {
+ return false
+ }
+ return utf8.ValidString(string(n))
+ default:
+ panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme))
+ }
+}
+
+// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the
+// legacy validation scheme regardless of the value of NameValidationScheme.
// This function, however, does not use MetricNameRE for the check but a much
// faster hardcoded implementation.
-func IsValidMetricName(n LabelValue) bool {
+func IsValidLegacyMetricName(n LabelValue) bool {
if len(n) == 0 {
return false
}
for i, b := range n {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
+ if !isValidLegacyRune(b, i) {
return false
}
}
return true
}
+
+// EscapeMetricFamily escapes the given metric names and labels with the given
+// escaping scheme. Returns a new object that uses the same pointers to fields
+// when possible and creates new escaped versions so as not to mutate the
+// input.
+func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily {
+ if v == nil {
+ return nil
+ }
+
+ if scheme == NoEscaping {
+ return v
+ }
+
+ out := &dto.MetricFamily{
+ Help: v.Help,
+ Type: v.Type,
+ Unit: v.Unit,
+ }
+
+ // If the name is nil, copy as-is, don't try to escape.
+ if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) {
+ out.Name = v.Name
+ } else {
+ out.Name = proto.String(EscapeName(v.GetName(), scheme))
+ }
+ for _, m := range v.Metric {
+ if !metricNeedsEscaping(m) {
+ out.Metric = append(out.Metric, m)
+ continue
+ }
+
+ escaped := &dto.Metric{
+ Gauge: m.Gauge,
+ Counter: m.Counter,
+ Summary: m.Summary,
+ Untyped: m.Untyped,
+ Histogram: m.Histogram,
+ TimestampMs: m.TimestampMs,
+ }
+
+ for _, l := range m.Label {
+ if l.GetName() == MetricNameLabel {
+ if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) {
+ escaped.Label = append(escaped.Label, l)
+ continue
+ }
+ escaped.Label = append(escaped.Label, &dto.LabelPair{
+ Name: proto.String(MetricNameLabel),
+ Value: proto.String(EscapeName(l.GetValue(), scheme)),
+ })
+ continue
+ }
+ if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) {
+ escaped.Label = append(escaped.Label, l)
+ continue
+ }
+ escaped.Label = append(escaped.Label, &dto.LabelPair{
+ Name: proto.String(EscapeName(l.GetName(), scheme)),
+ Value: l.Value,
+ })
+ }
+ out.Metric = append(out.Metric, escaped)
+ }
+ return out
+}
+
+func metricNeedsEscaping(m *dto.Metric) bool {
+ for _, l := range m.Label {
+ if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) {
+ return true
+ }
+ if !IsValidLegacyMetricName(LabelValue(l.GetName())) {
+ return true
+ }
+ }
+ return false
+}
+
+const (
+ lowerhex = "0123456789abcdef"
+)
+
+// EscapeName escapes the incoming name according to the provided escaping
+// scheme. Depending on the rules of escaping, this may cause no change in the
+// string that is returned. (Especially NoEscaping, which by definition is a
+// noop). This function does not do any validation of the name.
+func EscapeName(name string, scheme EscapingScheme) string {
+ if len(name) == 0 {
+ return name
+ }
+ var escaped strings.Builder
+ switch scheme {
+ case NoEscaping:
+ return name
+ case UnderscoreEscaping:
+ if IsValidLegacyMetricName(LabelValue(name)) {
+ return name
+ }
+ for i, b := range name {
+ if isValidLegacyRune(b, i) {
+ escaped.WriteRune(b)
+ } else {
+ escaped.WriteRune('_')
+ }
+ }
+ return escaped.String()
+ case DotsEscaping:
+ // Do not early return for legacy valid names, we still escape underscores.
+ for i, b := range name {
+ if b == '_' {
+ escaped.WriteString("__")
+ } else if b == '.' {
+ escaped.WriteString("_dot_")
+ } else if isValidLegacyRune(b, i) {
+ escaped.WriteRune(b)
+ } else {
+ escaped.WriteRune('_')
+ }
+ }
+ return escaped.String()
+ case ValueEncodingEscaping:
+ if IsValidLegacyMetricName(LabelValue(name)) {
+ return name
+ }
+ escaped.WriteString("U__")
+ for i, b := range name {
+ if isValidLegacyRune(b, i) {
+ escaped.WriteRune(b)
+ } else if !utf8.ValidRune(b) {
+ escaped.WriteString("_FFFD_")
+ } else if b < 0x100 {
+ escaped.WriteRune('_')
+ for s := 4; s >= 0; s -= 4 {
+ escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
+ }
+ escaped.WriteRune('_')
+ } else if b < 0x10000 {
+ escaped.WriteRune('_')
+ for s := 12; s >= 0; s -= 4 {
+ escaped.WriteByte(lowerhex[b>>uint(s)&0xF])
+ }
+ escaped.WriteRune('_')
+ }
+ }
+ return escaped.String()
+ default:
+ panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
+ }
+}
+
+// lower function taken from strconv.atoi
+func lower(c byte) byte {
+ return c | ('x' - 'X')
+}
+
+// UnescapeName unescapes the incoming name according to the provided escaping
+// scheme if possible. Some schemes are partially or totally non-roundtripable.
+// If any error is enountered, returns the original input.
+func UnescapeName(name string, scheme EscapingScheme) string {
+ if len(name) == 0 {
+ return name
+ }
+ switch scheme {
+ case NoEscaping:
+ return name
+ case UnderscoreEscaping:
+ // It is not possible to unescape from underscore replacement.
+ return name
+ case DotsEscaping:
+ name = strings.ReplaceAll(name, "_dot_", ".")
+ name = strings.ReplaceAll(name, "__", "_")
+ return name
+ case ValueEncodingEscaping:
+ escapedName, found := strings.CutPrefix(name, "U__")
+ if !found {
+ return name
+ }
+
+ var unescaped strings.Builder
+ TOP:
+ for i := 0; i < len(escapedName); i++ {
+ // All non-underscores are treated normally.
+ if escapedName[i] != '_' {
+ unescaped.WriteByte(escapedName[i])
+ continue
+ }
+ i++
+ if i >= len(escapedName) {
+ return name
+ }
+ // A double underscore is a single underscore.
+ if escapedName[i] == '_' {
+ unescaped.WriteByte('_')
+ continue
+ }
+ // We think we are in a UTF-8 code, process it.
+ var utf8Val uint
+ for j := 0; i < len(escapedName); j++ {
+ // This is too many characters for a utf8 value.
+ if j > 4 {
+ return name
+ }
+ // Found a closing underscore, convert to a rune, check validity, and append.
+ if escapedName[i] == '_' {
+ utf8Rune := rune(utf8Val)
+ if !utf8.ValidRune(utf8Rune) {
+ return name
+ }
+ unescaped.WriteRune(utf8Rune)
+ continue TOP
+ }
+ r := lower(escapedName[i])
+ utf8Val *= 16
+ if r >= '0' && r <= '9' {
+ utf8Val += uint(r) - '0'
+ } else if r >= 'a' && r <= 'f' {
+ utf8Val += uint(r) - 'a' + 10
+ } else {
+ return name
+ }
+ i++
+ }
+ // Didn't find closing underscore, invalid.
+ return name
+ }
+ return unescaped.String()
+ default:
+ panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
+ }
+}
+
+func isValidLegacyRune(b rune, i int) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)
+}
+
+func (e EscapingScheme) String() string {
+ switch e {
+ case NoEscaping:
+ return AllowUTF8
+ case UnderscoreEscaping:
+ return EscapeUnderscores
+ case DotsEscaping:
+ return EscapeDots
+ case ValueEncodingEscaping:
+ return EscapeValues
+ default:
+ panic(fmt.Sprintf("unknown format scheme %d", e))
+ }
+}
+
+func ToEscapingScheme(s string) (EscapingScheme, error) {
+ if s == "" {
+ return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme")
+ }
+ switch s {
+ case AllowUTF8:
+ return NoEscaping, nil
+ case EscapeUnderscores:
+ return UnderscoreEscaping, nil
+ case EscapeDots:
+ return DotsEscaping, nil
+ case EscapeValues:
+ return ValueEncodingEscaping, nil
+ default:
+ return NoEscaping, fmt.Errorf("unknown format scheme " + s)
+ }
+}
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
index 8762b13c63d..dc8a0026c48 100644
--- a/vendor/github.com/prometheus/common/model/signature.go
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -22,10 +22,8 @@ import (
// when calculating their combined hash value (aka signature aka fingerprint).
const SeparatorByte byte = 255
-var (
- // cache the signature of an empty label set.
- emptyLabelSignature = hashNew()
-)
+// cache the signature of an empty label set.
+var emptyLabelSignature = hashNew()
// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
// given label set. (Collisions are possible but unlikely if the number of label
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
index bb99889d2cc..910b0b71fcc 100644
--- a/vendor/github.com/prometheus/common/model/silence.go
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -81,7 +81,7 @@ func (s *Silence) Validate() error {
}
for _, m := range s.Matchers {
if err := m.Validate(); err != nil {
- return fmt.Errorf("invalid matcher: %s", err)
+ return fmt.Errorf("invalid matcher: %w", err)
}
}
if s.StartsAt.IsZero() {
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
index 7f67b16e429..5727452c1ee 100644
--- a/vendor/github.com/prometheus/common/model/time.go
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -18,7 +18,6 @@ import (
"errors"
"fmt"
"math"
- "regexp"
"strconv"
"strings"
"time"
@@ -183,54 +182,78 @@ func (d *Duration) Type() string {
return "duration"
}
-var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$")
+func isdigit(c byte) bool { return c >= '0' && c <= '9' }
+
+// Units are required to go in order from biggest to smallest.
+// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day.
+var unitMap = map[string]struct {
+ pos int
+ mult uint64
+}{
+ "ms": {7, uint64(time.Millisecond)},
+ "s": {6, uint64(time.Second)},
+ "m": {5, uint64(time.Minute)},
+ "h": {4, uint64(time.Hour)},
+ "d": {3, uint64(24 * time.Hour)},
+ "w": {2, uint64(7 * 24 * time.Hour)},
+ "y": {1, uint64(365 * 24 * time.Hour)},
+}
// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
-func ParseDuration(durationStr string) (Duration, error) {
- switch durationStr {
+func ParseDuration(s string) (Duration, error) {
+ switch s {
case "0":
// Allow 0 without a unit.
return 0, nil
case "":
- return 0, fmt.Errorf("empty duration string")
- }
- matches := durationRE.FindStringSubmatch(durationStr)
- if matches == nil {
- return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
+ return 0, errors.New("empty duration string")
}
- var dur time.Duration
- // Parse the match at pos `pos` in the regex and use `mult` to turn that
- // into ms, then add that value to the total parsed duration.
- var overflowErr error
- m := func(pos int, mult time.Duration) {
- if matches[pos] == "" {
- return
+ orig := s
+ var dur uint64
+ lastUnitPos := 0
+
+ for s != "" {
+ if !isdigit(s[0]) {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ // Consume [0-9]*
+ i := 0
+ for ; i < len(s) && isdigit(s[i]); i++ {
+ }
+ v, err := strconv.ParseUint(s[:i], 10, 0)
+ if err != nil {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
}
- n, _ := strconv.Atoi(matches[pos])
+ s = s[i:]
+ // Consume unit.
+ for i = 0; i < len(s) && !isdigit(s[i]); i++ {
+ }
+ if i == 0 {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ u := s[:i]
+ s = s[i:]
+ unit, ok := unitMap[u]
+ if !ok {
+ return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig)
+ }
+ if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest.
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ lastUnitPos = unit.pos
// Check if the provided duration overflows time.Duration (> ~ 290years).
- if n > int((1<<63-1)/mult/time.Millisecond) {
- overflowErr = errors.New("duration out of range")
+ if v > 1<<63/unit.mult {
+ return 0, errors.New("duration out of range")
}
- d := time.Duration(n) * time.Millisecond
- dur += d * mult
-
- if dur < 0 {
- overflowErr = errors.New("duration out of range")
+ dur += v * unit.mult
+ if dur > 1<<63-1 {
+ return 0, errors.New("duration out of range")
}
}
-
- m(2, 1000*60*60*24*365) // y
- m(4, 1000*60*60*24*7) // w
- m(6, 1000*60*60*24) // d
- m(8, 1000*60*60) // h
- m(10, 1000*60) // m
- m(12, 1000) // s
- m(14, 1) // ms
-
- return Duration(dur), overflowErr
+ return Duration(dur), nil
}
func (d Duration) String() string {
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
index c9d8fb1a283..8050637d822 100644
--- a/vendor/github.com/prometheus/common/model/value.go
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -16,104 +16,26 @@ package model
import (
"encoding/json"
"fmt"
- "math"
"sort"
"strconv"
"strings"
)
-var (
- // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
- // non-existing sample pair. It is a SamplePair with timestamp Earliest and
- // value 0.0. Note that the natural zero value of SamplePair has a timestamp
- // of 0, which is possible to appear in a real SamplePair and thus not
- // suitable to signal a non-existing SamplePair.
- ZeroSamplePair = SamplePair{Timestamp: Earliest}
-
- // ZeroSample is the pseudo zero-value of Sample used to signal a
- // non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
- // and metric nil. Note that the natural zero value of Sample has a timestamp
- // of 0, which is possible to appear in a real Sample and thus not suitable
- // to signal a non-existing Sample.
- ZeroSample = Sample{Timestamp: Earliest}
-)
-
-// A SampleValue is a representation of a value for a given sample at a given
-// time.
-type SampleValue float64
-
-// MarshalJSON implements json.Marshaler.
-func (v SampleValue) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (v *SampleValue) UnmarshalJSON(b []byte) error {
- if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
- return fmt.Errorf("sample value must be a quoted string")
- }
- f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
- if err != nil {
- return err
- }
- *v = SampleValue(f)
- return nil
-}
-
-// Equal returns true if the value of v and o is equal or if both are NaN. Note
-// that v==o is false if both are NaN. If you want the conventional float
-// behavior, use == to compare two SampleValues.
-func (v SampleValue) Equal(o SampleValue) bool {
- if v == o {
- return true
- }
- return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
-}
-
-func (v SampleValue) String() string {
- return strconv.FormatFloat(float64(v), 'f', -1, 64)
-}
-
-// SamplePair pairs a SampleValue with a Timestamp.
-type SamplePair struct {
- Timestamp Time
- Value SampleValue
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s SamplePair) MarshalJSON() ([]byte, error) {
- t, err := json.Marshal(s.Timestamp)
- if err != nil {
- return nil, err
- }
- v, err := json.Marshal(s.Value)
- if err != nil {
- return nil, err
- }
- return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *SamplePair) UnmarshalJSON(b []byte) error {
- v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
- return json.Unmarshal(b, &v)
-}
-
-// Equal returns true if this SamplePair and o have equal Values and equal
-// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
-func (s *SamplePair) Equal(o *SamplePair) bool {
- return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
-}
-
-func (s SamplePair) String() string {
- return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
-}
+// ZeroSample is the pseudo zero-value of Sample used to signal a
+// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+// and metric nil. Note that the natural zero value of Sample has a timestamp
+// of 0, which is possible to appear in a real Sample and thus not suitable
+// to signal a non-existing Sample.
+var ZeroSample = Sample{Timestamp: Earliest}
-// Sample is a sample pair associated with a metric.
+// Sample is a sample pair associated with a metric. A single sample must either
+// define Value or Histogram but not both. Histogram == nil implies the Value
+// field is used, otherwise it should be ignored.
type Sample struct {
- Metric Metric `json:"metric"`
- Value SampleValue `json:"value"`
- Timestamp Time `json:"timestamp"`
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+ Histogram *SampleHistogram `json:"histogram"`
}
// Equal compares first the metrics, then the timestamp, then the value. The
@@ -129,11 +51,19 @@ func (s *Sample) Equal(o *Sample) bool {
if !s.Timestamp.Equal(o.Timestamp) {
return false
}
-
+ if s.Histogram != nil {
+ return s.Histogram.Equal(o.Histogram)
+ }
return s.Value.Equal(o.Value)
}
func (s Sample) String() string {
+ if s.Histogram != nil {
+ return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ })
+ }
return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
Timestamp: s.Timestamp,
Value: s.Value,
@@ -142,6 +72,19 @@ func (s Sample) String() string {
// MarshalJSON implements json.Marshaler.
func (s Sample) MarshalJSON() ([]byte, error) {
+ if s.Histogram != nil {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Histogram SampleHistogramPair `json:"histogram"`
+ }{
+ Metric: s.Metric,
+ Histogram: SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ },
+ }
+ return json.Marshal(&v)
+ }
v := struct {
Metric Metric `json:"metric"`
Value SamplePair `json:"value"`
@@ -152,21 +95,25 @@ func (s Sample) MarshalJSON() ([]byte, error) {
Value: s.Value,
},
}
-
return json.Marshal(&v)
}
// UnmarshalJSON implements json.Unmarshaler.
func (s *Sample) UnmarshalJSON(b []byte) error {
v := struct {
- Metric Metric `json:"metric"`
- Value SamplePair `json:"value"`
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ Histogram SampleHistogramPair `json:"histogram"`
}{
Metric: s.Metric,
Value: SamplePair{
Timestamp: s.Timestamp,
Value: s.Value,
},
+ Histogram: SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ },
}
if err := json.Unmarshal(b, &v); err != nil {
@@ -174,8 +121,13 @@ func (s *Sample) UnmarshalJSON(b []byte) error {
}
s.Metric = v.Metric
- s.Timestamp = v.Value.Timestamp
- s.Value = v.Value.Value
+ if v.Histogram.Histogram != nil {
+ s.Timestamp = v.Histogram.Timestamp
+ s.Histogram = v.Histogram.Histogram
+ } else {
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+ }
return nil
}
@@ -221,80 +173,76 @@ func (s Samples) Equal(o Samples) bool {
// SampleStream is a stream of Values belonging to an attached COWMetric.
type SampleStream struct {
- Metric Metric `json:"metric"`
- Values []SamplePair `json:"values"`
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
}
func (ss SampleStream) String() string {
- vals := make([]string, len(ss.Values))
+ valuesLength := len(ss.Values)
+ vals := make([]string, valuesLength+len(ss.Histograms))
for i, v := range ss.Values {
vals[i] = v.String()
}
+ for i, v := range ss.Histograms {
+ vals[i+valuesLength] = v.String()
+ }
return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
}
-// Value is a generic interface for values resulting from a query evaluation.
-type Value interface {
- Type() ValueType
- String() string
+func (ss SampleStream) MarshalJSON() ([]byte, error) {
+ if len(ss.Histograms) > 0 && len(ss.Values) > 0 {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ Histograms: ss.Histograms,
+ }
+ return json.Marshal(&v)
+ } else if len(ss.Histograms) > 0 {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Histograms: ss.Histograms,
+ }
+ return json.Marshal(&v)
+ } else {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ }
+ return json.Marshal(&v)
+ }
}
-func (Matrix) Type() ValueType { return ValMatrix }
-func (Vector) Type() ValueType { return ValVector }
-func (*Scalar) Type() ValueType { return ValScalar }
-func (*String) Type() ValueType { return ValString }
-
-type ValueType int
-
-const (
- ValNone ValueType = iota
- ValScalar
- ValVector
- ValMatrix
- ValString
-)
-
-// MarshalJSON implements json.Marshaler.
-func (et ValueType) MarshalJSON() ([]byte, error) {
- return json.Marshal(et.String())
-}
+func (ss *SampleStream) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ Histograms: ss.Histograms,
+ }
-func (et *ValueType) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- switch s {
- case "":
- *et = ValNone
- case "scalar":
- *et = ValScalar
- case "vector":
- *et = ValVector
- case "matrix":
- *et = ValMatrix
- case "string":
- *et = ValString
- default:
- return fmt.Errorf("unknown value type %q", s)
- }
- return nil
-}
-func (e ValueType) String() string {
- switch e {
- case ValNone:
- return ""
- case ValScalar:
- return "scalar"
- case ValVector:
- return "vector"
- case ValMatrix:
- return "matrix"
- case ValString:
- return "string"
- }
- panic("ValueType.String: unhandled value type")
+ ss.Metric = v.Metric
+ ss.Values = v.Values
+ ss.Histograms = v.Histograms
+
+ return nil
}
// Scalar is a scalar value evaluated at the set timestamp.
@@ -324,7 +272,7 @@ func (s *Scalar) UnmarshalJSON(b []byte) error {
value, err := strconv.ParseFloat(f, 64)
if err != nil {
- return fmt.Errorf("error parsing sample value: %s", err)
+ return fmt.Errorf("error parsing sample value: %w", err)
}
s.Value = SampleValue(value)
return nil
diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go
new file mode 100644
index 00000000000..ae35cc2ab4b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_float.go
@@ -0,0 +1,98 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+)
+
+// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+// non-existing sample pair. It is a SamplePair with timestamp Earliest and
+// value 0.0. Note that the natural zero value of SamplePair has a timestamp
+// of 0, which is possible to appear in a real SamplePair and thus not
+// suitable to signal a non-existing SamplePair.
+var ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go
new file mode 100644
index 00000000000..54bb038cfff
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_histogram.go
@@ -0,0 +1,178 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type FloatString float64
+
+func (v FloatString) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+func (v FloatString) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+func (v *FloatString) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return fmt.Errorf("float value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = FloatString(f)
+ return nil
+}
+
+type HistogramBucket struct {
+ Boundaries int32
+ Lower FloatString
+ Upper FloatString
+ Count FloatString
+}
+
+func (s HistogramBucket) MarshalJSON() ([]byte, error) {
+ b, err := json.Marshal(s.Boundaries)
+ if err != nil {
+ return nil, err
+ }
+ l, err := json.Marshal(s.Lower)
+ if err != nil {
+ return nil, err
+ }
+ u, err := json.Marshal(s.Upper)
+ if err != nil {
+ return nil, err
+ }
+ c, err := json.Marshal(s.Count)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil
+}
+
+func (s *HistogramBucket) UnmarshalJSON(buf []byte) error {
+ tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count}
+ wantLen := len(tmp)
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ if gotLen := len(tmp); gotLen != wantLen {
+ return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+ }
+ return nil
+}
+
+func (s *HistogramBucket) Equal(o *HistogramBucket) bool {
+ return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count)
+}
+
+func (b HistogramBucket) String() string {
+ var sb strings.Builder
+ lowerInclusive := b.Boundaries == 1 || b.Boundaries == 3
+ upperInclusive := b.Boundaries == 0 || b.Boundaries == 3
+ if lowerInclusive {
+ sb.WriteRune('[')
+ } else {
+ sb.WriteRune('(')
+ }
+ fmt.Fprintf(&sb, "%g,%g", b.Lower, b.Upper)
+ if upperInclusive {
+ sb.WriteRune(']')
+ } else {
+ sb.WriteRune(')')
+ }
+ fmt.Fprintf(&sb, ":%v", b.Count)
+ return sb.String()
+}
+
+type HistogramBuckets []*HistogramBucket
+
+func (s HistogramBuckets) Equal(o HistogramBuckets) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, bucket := range s {
+ if !bucket.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+type SampleHistogram struct {
+ Count FloatString `json:"count"`
+ Sum FloatString `json:"sum"`
+ Buckets HistogramBuckets `json:"buckets"`
+}
+
+func (s SampleHistogram) String() string {
+ return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets)
+}
+
+func (s *SampleHistogram) Equal(o *SampleHistogram) bool {
+ return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets))
+}
+
+type SampleHistogramPair struct {
+ Timestamp Time
+ // Histogram should never be nil, it's only stored as pointer for efficiency.
+ Histogram *SampleHistogram
+}
+
+func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
+ if s.Histogram == nil {
+ return nil, fmt.Errorf("histogram is nil")
+ }
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Histogram)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
+ tmp := []interface{}{&s.Timestamp, &s.Histogram}
+ wantLen := len(tmp)
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ if gotLen := len(tmp); gotLen != wantLen {
+ return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+ }
+ if s.Histogram == nil {
+ return fmt.Errorf("histogram is null")
+ }
+ return nil
+}
+
+func (s SampleHistogramPair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp)
+}
+
+func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool {
+ return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp))
+}
diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go
new file mode 100644
index 00000000000..726c50ee638
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_type.go
@@ -0,0 +1,83 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (e ValueType) String() string {
+ switch e {
+ case ValNone:
+ return ""
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml
index a197699a1ee..126df9e67ac 100644
--- a/vendor/github.com/prometheus/procfs/.golangci.yml
+++ b/vendor/github.com/prometheus/procfs/.golangci.yml
@@ -1,8 +1,16 @@
---
linters:
enable:
+ - errcheck
- godot
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
- revive
+ - staticcheck
+ - testifylint
+ - unused
linter-settings:
godot:
@@ -10,3 +18,5 @@ linter-settings:
exclude:
# Ignore "See: URL"
- 'See:'
+ misspell:
+ locale: US
diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
index 56ba67d3e31..e00f3b365b6 100644
--- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md
+++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
@@ -1,2 +1,3 @@
* Johannes 'fish' Ziemke @discordianfish
-* Paul Gier @pgier
+* Paul Gier @pgier
+* Ben Kochie @SuperQ
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
index 6c8e3e21979..16172923506 100644
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -49,25 +49,28 @@ endif
GOTEST := $(GO) test
GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),)
-ifneq ($(shell which gotestsum),)
+ifneq ($(shell command -v gotestsum 2> /dev/null),)
GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif
endif
-PROMU_VERSION ?= 0.13.0
+PROMU_VERSION ?= 0.17.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
+SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.45.2
-# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
+GOLANGCI_LINT_VERSION ?= v1.59.0
+# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
- ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
+ ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
# If we're in CI and there is an Actions file, that means the linter
# is being run in Actions, so we don't need to run it here.
- ifeq (,$(CIRCLE_JOB))
+ ifneq (,$(SKIP_GOLANGCI_LINT))
+ GOLANGCI_LINT :=
+ else ifeq (,$(CIRCLE_JOB))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
@@ -88,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
+SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
+
ifeq ($(GOHOSTARCH),amd64)
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
# Only supported on amd64
@@ -164,16 +169,20 @@ common-vet:
common-lint: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT
@echo ">> running golangci-lint"
-# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
-# Otherwise staticcheck might fail randomly for some reason not yet explained.
- $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
endif
+.PHONY: common-lint-fix
+common-lint-fix: $(GOLANGCI_LINT)
+ifdef GOLANGCI_LINT
+ @echo ">> running golangci-lint fix"
+ $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
+endif
+
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"
-ifeq (, $(shell which yamllint))
+ifeq (, $(shell command -v yamllint 2> /dev/null))
@echo "yamllint not installed so skipping"
else
yamllint .
@@ -199,10 +208,14 @@ common-tarball: promu
@echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
+.PHONY: common-docker-repo-name
+common-docker-repo-name:
+ @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
+
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:
- docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
+ docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
-f $(DOCKERFILE_PATH) \
--build-arg ARCH="$*" \
--build-arg OS="linux" \
@@ -211,19 +224,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
- docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
+ docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest
common-docker-manifest:
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
.PHONY: promu
promu: $(PROMU)
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
index 43c37735a70..1224816c2ad 100644
--- a/vendor/github.com/prometheus/procfs/README.md
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -51,11 +51,11 @@ ensure the `fixtures` directory is up to date by removing the existing directory
extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
```bash
-rm -rf fixtures
+rm -rf testdata/fixtures
make test
```
Next, make the required changes to the extracted files in the `fixtures` directory. When
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
based on the updated `fixtures` directory. And finally, verify the changes using
-`git diff fixtures.ttar`.
+`git diff testdata/fixtures.ttar`.
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
index 68f36e888f9..cdcc8a7ccc4 100644
--- a/vendor/github.com/prometheus/procfs/arp.go
+++ b/vendor/github.com/prometheus/procfs/arp.go
@@ -55,7 +55,7 @@ type ARPEntry struct {
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := os.ReadFile(fs.proc.Path("net/arp"))
if err != nil {
- return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
+ return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
}
return parseARPEntries(data)
@@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
} else if width == expectedDataWidth {
entry, err := parseARPEntry(columns)
if err != nil {
- return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err)
+ return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
}
entries = append(entries, entry)
} else {
- return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth)
+ return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
}
}
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
index f5b7939b266..83807500908 100644
--- a/vendor/github.com/prometheus/procfs/buddyinfo.go
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -55,18 +55,18 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
parts := strings.Fields(line)
if len(parts) < 4 {
- return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
+ return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
}
- node := strings.TrimRight(parts[1], ",")
- zone := strings.TrimRight(parts[3], ",")
+ node := strings.TrimSuffix(parts[1], ",")
+ zone := strings.TrimSuffix(parts[3], ",")
arraySize := len(parts[4:])
if bucketCount == -1 {
bucketCount = arraySize
} else {
if bucketCount != arraySize {
- return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
+ return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize)
}
}
@@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil {
- return nil, fmt.Errorf("invalid value in buddyinfo: %w", err)
+ return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
}
}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
index ff6b927da15..f0950bb4953 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo.go
@@ -79,7 +79,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
@@ -192,9 +192,10 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
- match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
+ match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
+
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@@ -258,7 +259,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@@ -283,7 +284,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
if strings.HasPrefix(line, "processor") {
match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
if len(match) < 2 {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
cpu := commonCPUInfo
v, err := strconv.ParseUint(match[1], 0, 32)
@@ -343,7 +344,7 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@@ -380,12 +381,48 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
return cpuinfo, nil
}
+func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+ // find the first "processor" line
+ firstLine := firstNonEmptyLine(scanner)
+ if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+ }
+ field := strings.SplitN(firstLine, ": ", 2)
+ cpuinfo := []CPUInfo{}
+ systemType := field[1]
+ i := 0
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !strings.Contains(line, ":") {
+ continue
+ }
+ field := strings.SplitN(line, ": ", 2)
+ switch strings.TrimSpace(field[0]) {
+ case "processor":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ i = int(v)
+ cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+ cpuinfo[i].Processor = uint(v)
+ cpuinfo[i].VendorID = systemType
+ case "CPU Family":
+ cpuinfo[i].CPUFamily = field[1]
+ case "Model Name":
+ cpuinfo[i].ModelName = field[1]
+ }
+ }
+ return cpuinfo, nil
+}
+
func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
@@ -430,7 +467,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
similarity index 73%
rename from vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
rename to vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
index c318385cbed..d88442f0edf 100644
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
@@ -1,10 +1,9 @@
-// Copyright 2013 Matt T. Proud
-//
+// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
-// http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,5 +11,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package pbutil provides record length-delimited Protocol Buffer streaming.
-package pbutil
+//go:build linux
+// +build linux
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoLoong
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
index ea41bf2ca1e..a6b2b3127cb 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
@@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
-// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
+//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
+// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
package procfs
diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go
index 5048ad1f214..5f2a37a78b3 100644
--- a/vendor/github.com/prometheus/procfs/crypto.go
+++ b/vendor/github.com/prometheus/procfs/crypto.go
@@ -55,12 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) {
path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path)
if err != nil {
- return nil, fmt.Errorf("error reading crypto %q: %w", path, err)
+ return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err)
+
}
crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("error parsing crypto %q: %w", path, err)
+ return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err)
}
return crypto, nil
@@ -83,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) {
kv := strings.Split(text, ":")
if len(kv) != 2 {
- return nil, fmt.Errorf("malformed crypto line: %q", text)
+ return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
}
k := strings.TrimSpace(kv[0])
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
index d31a82600f6..f9d961e4417 100644
--- a/vendor/github.com/prometheus/procfs/doc.go
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -16,30 +16,29 @@
//
// Example:
//
-// package main
-//
-// import (
-// "fmt"
-// "log"
-//
-// "github.com/prometheus/procfs"
-// )
-//
-// func main() {
-// p, err := procfs.Self()
-// if err != nil {
-// log.Fatalf("could not get process: %s", err)
-// }
-//
-// stat, err := p.Stat()
-// if err != nil {
-// log.Fatalf("could not get process stat: %s", err)
-// }
-//
-// fmt.Printf("command: %s\n", stat.Comm)
-// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
-// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
-// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
-// }
-//
+// package main
+//
+// import (
+// "fmt"
+// "log"
+//
+// "github.com/prometheus/procfs"
+// )
+//
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
+//
+// stat, err := p.Stat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
+//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
package procfs
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
index 0102ab0fd85..4980c875bfc 100644
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -20,7 +20,8 @@ import (
// FS represents the pseudo-filesystem sys, which provides an interface to
// kernel data structures.
type FS struct {
- proc fs.FS
+ proc fs.FS
+ isReal bool
}
// DefaultMountPoint is the common mount point of the proc filesystem.
@@ -39,5 +40,11 @@ func NewFS(mountPoint string) (FS, error) {
if err != nil {
return FS{}, err
}
- return FS{fs}, nil
+
+ isReal, err := isRealProc(mountPoint)
+ if err != nil {
+ return FS{}, err
+ }
+
+ return FS{fs, isReal}, nil
}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
new file mode 100644
index 00000000000..134767d69ac
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
@@ -0,0 +1,23 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !freebsd && !linux
+// +build !freebsd,!linux
+
+package procfs
+
+// isRealProc returns true on architectures that don't have a Type argument
+// in their Statfs_t struct
+func isRealProc(mountPoint string) (bool, error) {
+ return true, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
new file mode 100644
index 00000000000..80df79c3193
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
@@ -0,0 +1,33 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build freebsd || linux
+// +build freebsd linux
+
+package procfs
+
+import (
+ "syscall"
+)
+
+// isRealProc determines whether supplied mountpoint is really a proc filesystem.
+func isRealProc(mountPoint string) (bool, error) {
+ stat := syscall.Statfs_t{}
+ err := syscall.Statfs(mountPoint, &stat)
+ if err != nil {
+ return false, err
+ }
+
+ // 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87
+ return stat.Type == 0x9fa0, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
index f8070e6e2be..cf2e3eaa03c 100644
--- a/vendor/github.com/prometheus/procfs/fscache.go
+++ b/vendor/github.com/prometheus/procfs/fscache.go
@@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil {
- return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err)
+ return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err)
}
return *m, nil
@@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
func setFSCacheFields(fields []string, setFields ...*uint64) error {
var err error
if len(fields) < len(setFields) {
- return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields))
+ return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
}
for i := range setFields {
@@ -263,7 +263,7 @@ func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
for s.Scan() {
fields := strings.Fields(s.Text())
if len(fields) < 2 {
- return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text())
+ return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text())
}
switch fields[0] {
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
index b030951faf9..14272dc7885 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -64,6 +64,21 @@ func ParsePInt64s(ss []string) ([]*int64, error) {
return us, nil
}
+// Parses a uint64 from given hex in string.
+func ParseHexUint64s(ss []string) ([]*uint64, error) {
+ us := make([]*uint64, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, &u)
+ }
+
+ return us, nil
+}
+
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
func ReadUintFromFile(path string) (uint64, error) {
data, err := os.ReadFile(path)
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
index 391c07957e9..bc3a20c932d 100644
--- a/vendor/github.com/prometheus/procfs/ipvs.go
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -221,15 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) {
case 46:
ip = net.ParseIP(s[1:40])
if ip == nil {
- return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
+ return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
}
default:
- return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
+ return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
}
portString := s[len(s)-4:]
if len(portString) != 4 {
- return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
+ return nil, 0,
+ fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err)
}
port, err := strconv.ParseUint(portString, 16, 16)
if err != nil {
diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go
index 0096cafbdf8..332e76c17f5 100644
--- a/vendor/github.com/prometheus/procfs/loadavg.go
+++ b/vendor/github.com/prometheus/procfs/loadavg.go
@@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 {
- return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes))
+ return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes))
}
var err error
for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64)
if err != nil {
- return nil, fmt.Errorf("could not parse load %q: %w", load, err)
+ return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
}
}
return &LoadAvg{
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
index a95c889cb9e..67a9d2b4486 100644
--- a/vendor/github.com/prometheus/procfs/mdstat.go
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -23,7 +23,7 @@ import (
var (
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
- recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`)
+ recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`)
recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
@@ -50,6 +50,8 @@ type MDStat struct {
BlocksTotal int64
// Number of blocks on the device that are in sync.
BlocksSynced int64
+ // Number of blocks on the device that need to be synced.
+ BlocksToBeSynced int64
// progress percentage of current sync
BlocksSyncedPct float64
// estimated finishing time for current sync (in minutes)
@@ -70,7 +72,7 @@ func (fs FS) MDStat() ([]MDStat, error) {
}
mdstat, err := parseMDStat(data)
if err != nil {
- return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err)
+ return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
}
return mdstat, nil
}
@@ -90,13 +92,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
deviceFields := strings.Fields(line)
if len(deviceFields) < 3 {
- return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line)
+ return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line)
}
mdName := deviceFields[0] // mdx
state := deviceFields[2] // active or inactive
if len(lines) <= i+3 {
- return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName)
+ return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName)
}
// Failed disks have the suffix (F) & Spare disks have the suffix (S).
@@ -105,7 +107,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil {
- return nil, fmt.Errorf("error parsing md device lines: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
}
syncLineIdx := i + 2
@@ -115,7 +117,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
// If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device.
- syncedBlocks := size
+ blocksSynced := size
+ blocksToBeSynced := size
speed := float64(0)
finish := float64(0)
pct := float64(0)
@@ -136,11 +139,11 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
// Handle case when resync=PENDING or resync=DELAYED.
if strings.Contains(lines[syncLineIdx], "PENDING") ||
strings.Contains(lines[syncLineIdx], "DELAYED") {
- syncedBlocks = 0
+ blocksSynced = 0
} else {
- syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
+ blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil {
- return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
+ return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
}
}
}
@@ -154,7 +157,8 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
DisksSpare: spare,
DisksTotal: total,
BlocksTotal: size,
- BlocksSynced: syncedBlocks,
+ BlocksSynced: blocksSynced,
+ BlocksToBeSynced: blocksToBeSynced,
BlocksSyncedPct: pct,
BlocksSyncedFinishTime: finish,
BlocksSyncedSpeed: speed,
@@ -168,13 +172,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
statusFields := strings.Fields(statusLine)
if len(statusFields) < 1 {
- return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
sizeStr := statusFields[0]
size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
@@ -189,65 +193,71 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
matches := statusLineRE.FindStringSubmatch(statusLine)
if len(matches) != 5 {
- return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
}
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err)
}
down = int64(strings.Count(matches[4], "_"))
return active, total, down, size, nil
}
-func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
+func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) {
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
- return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
+ return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err)
}
- syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
+ blocks := strings.Split(matches[1], "/")
+ blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64)
if err != nil {
- return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
+ return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err)
+ }
+
+ blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64)
+ if err != nil {
+ return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err)
}
// Get percentage complete
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
- return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine)
+ return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
}
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
if err != nil {
- return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
+ return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
}
// Get time expected left to complete
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
- return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine)
+ return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
}
finish, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
- return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
+ return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
}
// Get recovery speed
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
- return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine)
+ return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
}
speed, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
- return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
+ return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
}
- return syncedBlocks, pct, finish, speed, nil
+ return blocksSynced, blocksToBeSynced, pct, finish, speed, nil
}
func evalComponentDevices(deviceFields []string) []string {
diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go
index f65e174e57b..4b2c4050a3d 100644
--- a/vendor/github.com/prometheus/procfs/meminfo.go
+++ b/vendor/github.com/prometheus/procfs/meminfo.go
@@ -126,6 +126,7 @@ type Meminfo struct {
VmallocUsed *uint64
// largest contiguous block of vmalloc area which is free
VmallocChunk *uint64
+ Percpu *uint64
HardwareCorrupted *uint64
AnonHugePages *uint64
ShmemHugePages *uint64
@@ -140,6 +141,55 @@ type Meminfo struct {
DirectMap4k *uint64
DirectMap2M *uint64
DirectMap1G *uint64
+
+ // The struct fields below are the byte-normalized counterparts to the
+ // existing struct fields. Values are normalized using the optional
+ // unit field in the meminfo line.
+ MemTotalBytes *uint64
+ MemFreeBytes *uint64
+ MemAvailableBytes *uint64
+ BuffersBytes *uint64
+ CachedBytes *uint64
+ SwapCachedBytes *uint64
+ ActiveBytes *uint64
+ InactiveBytes *uint64
+ ActiveAnonBytes *uint64
+ InactiveAnonBytes *uint64
+ ActiveFileBytes *uint64
+ InactiveFileBytes *uint64
+ UnevictableBytes *uint64
+ MlockedBytes *uint64
+ SwapTotalBytes *uint64
+ SwapFreeBytes *uint64
+ DirtyBytes *uint64
+ WritebackBytes *uint64
+ AnonPagesBytes *uint64
+ MappedBytes *uint64
+ ShmemBytes *uint64
+ SlabBytes *uint64
+ SReclaimableBytes *uint64
+ SUnreclaimBytes *uint64
+ KernelStackBytes *uint64
+ PageTablesBytes *uint64
+ NFSUnstableBytes *uint64
+ BounceBytes *uint64
+ WritebackTmpBytes *uint64
+ CommitLimitBytes *uint64
+ CommittedASBytes *uint64
+ VmallocTotalBytes *uint64
+ VmallocUsedBytes *uint64
+ VmallocChunkBytes *uint64
+ PercpuBytes *uint64
+ HardwareCorruptedBytes *uint64
+ AnonHugePagesBytes *uint64
+ ShmemHugePagesBytes *uint64
+ ShmemPmdMappedBytes *uint64
+ CmaTotalBytes *uint64
+ CmaFreeBytes *uint64
+ HugepagesizeBytes *uint64
+ DirectMap4kBytes *uint64
+ DirectMap2MBytes *uint64
+ DirectMap1GBytes *uint64
}
// Meminfo returns an information about current kernel/system memory statistics.
@@ -152,7 +202,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
m, err := parseMemInfo(bytes.NewReader(b))
if err != nil {
- return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err)
+ return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err)
}
return *m, nil
@@ -162,114 +212,176 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
var m Meminfo
s := bufio.NewScanner(r)
for s.Scan() {
- // Each line has at least a name and value; we ignore the unit.
fields := strings.Fields(s.Text())
- if len(fields) < 2 {
- return nil, fmt.Errorf("malformed meminfo line: %q", s.Text())
- }
+ var val, valBytes uint64
- v, err := strconv.ParseUint(fields[1], 0, 64)
+ val, err := strconv.ParseUint(fields[1], 0, 64)
if err != nil {
return nil, err
}
+ switch len(fields) {
+ case 2:
+ // No unit present, use the parsed the value as bytes directly.
+ valBytes = val
+ case 3:
+ // Unit present in optional 3rd field, convert it to
+ // bytes. The only unit supported within the Linux
+ // kernel is `kB`.
+ if fields[2] != "kB" {
+ return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
+ }
+
+ valBytes = 1024 * val
+
+ default:
+ return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
+ }
+
switch fields[0] {
case "MemTotal:":
- m.MemTotal = &v
+ m.MemTotal = &val
+ m.MemTotalBytes = &valBytes
case "MemFree:":
- m.MemFree = &v
+ m.MemFree = &val
+ m.MemFreeBytes = &valBytes
case "MemAvailable:":
- m.MemAvailable = &v
+ m.MemAvailable = &val
+ m.MemAvailableBytes = &valBytes
case "Buffers:":
- m.Buffers = &v
+ m.Buffers = &val
+ m.BuffersBytes = &valBytes
case "Cached:":
- m.Cached = &v
+ m.Cached = &val
+ m.CachedBytes = &valBytes
case "SwapCached:":
- m.SwapCached = &v
+ m.SwapCached = &val
+ m.SwapCachedBytes = &valBytes
case "Active:":
- m.Active = &v
+ m.Active = &val
+ m.ActiveBytes = &valBytes
case "Inactive:":
- m.Inactive = &v
+ m.Inactive = &val
+ m.InactiveBytes = &valBytes
case "Active(anon):":
- m.ActiveAnon = &v
+ m.ActiveAnon = &val
+ m.ActiveAnonBytes = &valBytes
case "Inactive(anon):":
- m.InactiveAnon = &v
+ m.InactiveAnon = &val
+ m.InactiveAnonBytes = &valBytes
case "Active(file):":
- m.ActiveFile = &v
+ m.ActiveFile = &val
+ m.ActiveFileBytes = &valBytes
case "Inactive(file):":
- m.InactiveFile = &v
+ m.InactiveFile = &val
+ m.InactiveFileBytes = &valBytes
case "Unevictable:":
- m.Unevictable = &v
+ m.Unevictable = &val
+ m.UnevictableBytes = &valBytes
case "Mlocked:":
- m.Mlocked = &v
+ m.Mlocked = &val
+ m.MlockedBytes = &valBytes
case "SwapTotal:":
- m.SwapTotal = &v
+ m.SwapTotal = &val
+ m.SwapTotalBytes = &valBytes
case "SwapFree:":
- m.SwapFree = &v
+ m.SwapFree = &val
+ m.SwapFreeBytes = &valBytes
case "Dirty:":
- m.Dirty = &v
+ m.Dirty = &val
+ m.DirtyBytes = &valBytes
case "Writeback:":
- m.Writeback = &v
+ m.Writeback = &val
+ m.WritebackBytes = &valBytes
case "AnonPages:":
- m.AnonPages = &v
+ m.AnonPages = &val
+ m.AnonPagesBytes = &valBytes
case "Mapped:":
- m.Mapped = &v
+ m.Mapped = &val
+ m.MappedBytes = &valBytes
case "Shmem:":
- m.Shmem = &v
+ m.Shmem = &val
+ m.ShmemBytes = &valBytes
case "Slab:":
- m.Slab = &v
+ m.Slab = &val
+ m.SlabBytes = &valBytes
case "SReclaimable:":
- m.SReclaimable = &v
+ m.SReclaimable = &val
+ m.SReclaimableBytes = &valBytes
case "SUnreclaim:":
- m.SUnreclaim = &v
+ m.SUnreclaim = &val
+ m.SUnreclaimBytes = &valBytes
case "KernelStack:":
- m.KernelStack = &v
+ m.KernelStack = &val
+ m.KernelStackBytes = &valBytes
case "PageTables:":
- m.PageTables = &v
+ m.PageTables = &val
+ m.PageTablesBytes = &valBytes
case "NFS_Unstable:":
- m.NFSUnstable = &v
+ m.NFSUnstable = &val
+ m.NFSUnstableBytes = &valBytes
case "Bounce:":
- m.Bounce = &v
+ m.Bounce = &val
+ m.BounceBytes = &valBytes
case "WritebackTmp:":
- m.WritebackTmp = &v
+ m.WritebackTmp = &val
+ m.WritebackTmpBytes = &valBytes
case "CommitLimit:":
- m.CommitLimit = &v
+ m.CommitLimit = &val
+ m.CommitLimitBytes = &valBytes
case "Committed_AS:":
- m.CommittedAS = &v
+ m.CommittedAS = &val
+ m.CommittedASBytes = &valBytes
case "VmallocTotal:":
- m.VmallocTotal = &v
+ m.VmallocTotal = &val
+ m.VmallocTotalBytes = &valBytes
case "VmallocUsed:":
- m.VmallocUsed = &v
+ m.VmallocUsed = &val
+ m.VmallocUsedBytes = &valBytes
case "VmallocChunk:":
- m.VmallocChunk = &v
+ m.VmallocChunk = &val
+ m.VmallocChunkBytes = &valBytes
+ case "Percpu:":
+ m.Percpu = &val
+ m.PercpuBytes = &valBytes
case "HardwareCorrupted:":
- m.HardwareCorrupted = &v
+ m.HardwareCorrupted = &val
+ m.HardwareCorruptedBytes = &valBytes
case "AnonHugePages:":
- m.AnonHugePages = &v
+ m.AnonHugePages = &val
+ m.AnonHugePagesBytes = &valBytes
case "ShmemHugePages:":
- m.ShmemHugePages = &v
+ m.ShmemHugePages = &val
+ m.ShmemHugePagesBytes = &valBytes
case "ShmemPmdMapped:":
- m.ShmemPmdMapped = &v
+ m.ShmemPmdMapped = &val
+ m.ShmemPmdMappedBytes = &valBytes
case "CmaTotal:":
- m.CmaTotal = &v
+ m.CmaTotal = &val
+ m.CmaTotalBytes = &valBytes
case "CmaFree:":
- m.CmaFree = &v
+ m.CmaFree = &val
+ m.CmaFreeBytes = &valBytes
case "HugePages_Total:":
- m.HugePagesTotal = &v
+ m.HugePagesTotal = &val
case "HugePages_Free:":
- m.HugePagesFree = &v
+ m.HugePagesFree = &val
case "HugePages_Rsvd:":
- m.HugePagesRsvd = &v
+ m.HugePagesRsvd = &val
case "HugePages_Surp:":
- m.HugePagesSurp = &v
+ m.HugePagesSurp = &val
case "Hugepagesize:":
- m.Hugepagesize = &v
+ m.Hugepagesize = &val
+ m.HugepagesizeBytes = &valBytes
case "DirectMap4k:":
- m.DirectMap4k = &v
+ m.DirectMap4k = &val
+ m.DirectMap4kBytes = &valBytes
case "DirectMap2M:":
- m.DirectMap2M = &v
+ m.DirectMap2M = &val
+ m.DirectMap2MBytes = &valBytes
case "DirectMap1G:":
- m.DirectMap1G = &v
+ m.DirectMap1G = &val
+ m.DirectMap1GBytes = &valBytes
}
}
diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go
index 59f4d505583..a704c5e735f 100644
--- a/vendor/github.com/prometheus/procfs/mountinfo.go
+++ b/vendor/github.com/prometheus/procfs/mountinfo.go
@@ -78,11 +78,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
mountInfo := strings.Split(mountString, " ")
mountInfoLength := len(mountInfo)
if mountInfoLength < 10 {
- return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString)
+ return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString)
}
if mountInfo[mountInfoLength-4] != "-" {
- return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4])
+ return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4])
}
mount := &MountInfo{
@@ -98,18 +98,18 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
mount.MountID, err = strconv.Atoi(mountInfo[0])
if err != nil {
- return nil, fmt.Errorf("failed to parse mount ID")
+ return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID)
}
mount.ParentID, err = strconv.Atoi(mountInfo[1])
if err != nil {
- return nil, fmt.Errorf("failed to parse parent ID")
+ return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID)
}
// Has optional fields, which is a space separated list of values.
// Example: shared:2 master:7
if mountInfo[6] != "" {
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("%w: %w", ErrFileParse, err)
}
}
return mount, nil
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index f7a828bb1da..75a3b6c810f 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -44,6 +44,14 @@ const (
fieldTransport11TCPLen = 13
fieldTransport11UDPLen = 10
+
+ // kernel version >= 4.14 MaxLen
+ // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
+ fieldTransport11RDMAMaxLen = 28
+
+ // kernel version <= 4.2 MinLen
+ // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
+ fieldTransport11RDMAMinLen = 20
)
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
@@ -80,7 +88,7 @@ type MountStatsNFS struct {
// Statistics broken down by filesystem operation.
Operations []NFSOperationStats
// Statistics about the NFS RPC transport.
- Transport NFSTransportStats
+ Transport []NFSTransportStats
}
// mountStats implements MountStats.
@@ -231,6 +239,33 @@ type NFSTransportStats struct {
// A running counter, incremented on each request as the current size of the
// pending queue.
CumulativePendingQueue uint64
+
+ // Stats below only available with stat version 1.1.
+ // Transport over RDMA
+
+ // accessed when sending a call
+ ReadChunkCount uint64
+ WriteChunkCount uint64
+ ReplyChunkCount uint64
+ TotalRdmaRequest uint64
+
+ // rarely accessed error counters
+ PullupCopyCount uint64
+ HardwayRegisterCount uint64
+ FailedMarshalCount uint64
+ BadReplyCount uint64
+ MrsRecovered uint64
+ MrsOrphaned uint64
+ MrsAllocated uint64
+ EmptySendctxQ uint64
+
+ // accessed when receiving a reply
+ TotalRdmaReply uint64
+ FixupCopyCount uint64
+ ReplyWaitsForSend uint64
+ LocalInvNeeded uint64
+ NomsgCallCount uint64
+ BcallCount uint64
}
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
@@ -264,7 +299,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) {
if len(ss) > deviceEntryLen {
// Only NFSv3 and v4 are supported for parsing statistics
if m.Type != nfs3Type && m.Type != nfs4Type {
- return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
+ return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type)
}
statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
@@ -284,10 +319,11 @@ func parseMountStats(r io.Reader) ([]*Mount, error) {
}
// parseMount parses an entry in /proc/[pid]/mountstats in the format:
-// device [device] mounted on [mount] with fstype [type]
+//
+// device [device] mounted on [mount] with fstype [type]
func parseMount(ss []string) (*Mount, error) {
if len(ss) < deviceEntryLen {
- return nil, fmt.Errorf("invalid device entry: %v", ss)
+ return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
}
// Check for specific words appearing at specific indices to ensure
@@ -305,7 +341,7 @@ func parseMount(ss []string) (*Mount, error) {
for _, f := range format {
if ss[f.i] != f.s {
- return nil, fmt.Errorf("invalid device entry: %v", ss)
+ return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
}
}
@@ -342,7 +378,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
switch ss[0] {
case fieldOpts:
if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
}
if stats.Opts == nil {
stats.Opts = map[string]string{}
@@ -357,7 +393,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
}
case fieldAge:
if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
}
// Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s")
@@ -368,7 +404,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Age = d
case fieldBytes:
if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
}
bstats, err := parseNFSBytesStats(ss[1:])
if err != nil {
@@ -378,7 +414,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Bytes = *bstats
case fieldEvents:
if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss)
}
estats, err := parseNFSEventsStats(ss[1:])
if err != nil {
@@ -388,7 +424,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Events = *estats
case fieldTransport:
if len(ss) < 3 {
- return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
+ return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss)
}
tstats, err := parseNFSTransportStats(ss[1:], statVersion)
@@ -396,7 +432,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
return nil, err
}
- stats.Transport = *tstats
+ stats.Transport = append(stats.Transport, *tstats)
}
// When encountering "per-operation statistics", we must break this
@@ -427,7 +463,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
// integer fields.
func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
if len(ss) != fieldBytesLen {
- return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
+ return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss)
}
ns := make([]uint64, 0, fieldBytesLen)
@@ -456,7 +492,7 @@ func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
// integer fields.
func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
if len(ss) != fieldEventsLen {
- return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
+ return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss)
}
ns := make([]uint64, 0, fieldEventsLen)
@@ -520,7 +556,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
}
if len(ss) < minFields {
- return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
+ return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss)
}
// Skip string operation name for integers
@@ -533,7 +569,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
ns = append(ns, n)
}
-
opStats := NFSOperationStats{
Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0],
@@ -571,10 +606,10 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
} else if protocol == "udp" {
expectedLength = fieldTransport10UDPLen
} else {
- return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
+ return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
}
if len(ss) != expectedLength {
- return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
+ return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss)
}
case statVersion11:
var expectedLength int
@@ -582,14 +617,17 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
expectedLength = fieldTransport11TCPLen
} else if protocol == "udp" {
expectedLength = fieldTransport11UDPLen
+ } else if protocol == "rdma" {
+ expectedLength = fieldTransport11RDMAMinLen
} else {
- return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
+ return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
}
- if len(ss) != expectedLength {
- return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
+ if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
+ (protocol == "rdma" && len(ss) < expectedLength) {
+ return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
}
default:
- return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
+ return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
}
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
@@ -599,7 +637,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
// Note: slice length must be set to length of v1.1 stats to avoid a panic when
// only v1.0 stats are present.
// See: https://github.com/prometheus/node_exporter/issues/571.
- ns := make([]uint64, fieldTransport11TCPLen)
+ //
+ // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen
+ ns := make([]uint64, fieldTransport11RDMAMaxLen+3)
for i, s := range ss {
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
@@ -617,9 +657,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
// we set them to 0 here.
if protocol == "udp" {
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
+ } else if protocol == "tcp" {
+ ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
+ } else if protocol == "rdma" {
+ ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
}
return &NFSTransportStats{
+ // NFS xprt over tcp or udp
Protocol: protocol,
Port: ns[0],
Bind: ns[1],
@@ -631,8 +676,32 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
BadTransactionIDs: ns[7],
CumulativeActiveRequests: ns[8],
CumulativeBacklog: ns[9],
- MaximumRPCSlotsUsed: ns[10],
- CumulativeSendingQueue: ns[11],
- CumulativePendingQueue: ns[12],
+
+ // NFS xprt over tcp or udp
+ // And statVersion 1.1
+ MaximumRPCSlotsUsed: ns[10],
+ CumulativeSendingQueue: ns[11],
+ CumulativePendingQueue: ns[12],
+
+ // NFS xprt over rdma
+ // And stat Version 1.1
+ ReadChunkCount: ns[13],
+ WriteChunkCount: ns[14],
+ ReplyChunkCount: ns[15],
+ TotalRdmaRequest: ns[16],
+ PullupCopyCount: ns[17],
+ HardwayRegisterCount: ns[18],
+ FailedMarshalCount: ns[19],
+ BadReplyCount: ns[20],
+ MrsRecovered: ns[21],
+ MrsOrphaned: ns[22],
+ MrsAllocated: ns[23],
+ EmptySendctxQ: ns[24],
+ TotalRdmaReply: ns[25],
+ FixupCopyCount: ns[26],
+ ReplyWaitsForSend: ns[27],
+ LocalInvNeeded: ns[28],
+ NomsgCallCount: ns[29],
+ BcallCount: ns[30],
}, nil
}
diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
index 8300daca054..316df5fbb74 100644
--- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go
+++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
@@ -18,7 +18,6 @@ import (
"bytes"
"fmt"
"io"
- "strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
@@ -28,9 +27,13 @@ import (
// and contains netfilter conntrack statistics at one CPU core.
type ConntrackStatEntry struct {
Entries uint64
+ Searched uint64
Found uint64
+ New uint64
Invalid uint64
Ignore uint64
+ Delete uint64
+ DeleteList uint64
Insert uint64
InsertFailed uint64
Drop uint64
@@ -55,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err)
+ return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err)
}
return stat, nil
@@ -81,73 +84,35 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
// Parses a ConntrackStatEntry from given array of fields.
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
- if len(fields) != 17 {
- return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
- }
- entry := &ConntrackStatEntry{}
-
- entries, err := parseConntrackStatField(fields[0])
- if err != nil {
- return nil, err
- }
- entry.Entries = entries
-
- found, err := parseConntrackStatField(fields[2])
- if err != nil {
- return nil, err
- }
- entry.Found = found
-
- invalid, err := parseConntrackStatField(fields[4])
- if err != nil {
- return nil, err
- }
- entry.Invalid = invalid
-
- ignore, err := parseConntrackStatField(fields[5])
- if err != nil {
- return nil, err
- }
- entry.Ignore = ignore
-
- insert, err := parseConntrackStatField(fields[8])
+ entries, err := util.ParseHexUint64s(fields)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
}
- entry.Insert = insert
-
- insertFailed, err := parseConntrackStatField(fields[9])
- if err != nil {
- return nil, err
+ numEntries := len(entries)
+ if numEntries < 16 || numEntries > 17 {
+ return nil,
+ fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries)
}
- entry.InsertFailed = insertFailed
- drop, err := parseConntrackStatField(fields[10])
- if err != nil {
- return nil, err
+ stats := &ConntrackStatEntry{
+ Entries: *entries[0],
+ Searched: *entries[1],
+ Found: *entries[2],
+ New: *entries[3],
+ Invalid: *entries[4],
+ Ignore: *entries[5],
+ Delete: *entries[6],
+ DeleteList: *entries[7],
+ Insert: *entries[8],
+ InsertFailed: *entries[9],
+ Drop: *entries[10],
+ EarlyDrop: *entries[11],
}
- entry.Drop = drop
- earlyDrop, err := parseConntrackStatField(fields[11])
- if err != nil {
- return nil, err
+ // Ignore missing search_restart on Linux < 2.6.35.
+ if numEntries == 17 {
+ stats.SearchRestart = *entries[16]
}
- entry.EarlyDrop = earlyDrop
- searchRestart, err := parseConntrackStatField(fields[16])
- if err != nil {
- return nil, err
- }
- entry.SearchRestart = searchRestart
-
- return entry, nil
-}
-
-// Parses a uint64 from given hex in string.
-func parseConntrackStatField(field string) (uint64, error) {
- val, err := strconv.ParseUint(field, 16, 64)
- if err != nil {
- return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
- }
- return val, err
+ return stats, nil
}
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
index 7fd57d7f463..b70f1fc7a4a 100644
--- a/vendor/github.com/prometheus/procfs/net_ip_socket.go
+++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go
@@ -50,10 +50,13 @@ type (
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
+ // Drops shows the total number of dropped packets of all UPD sockets.
+ Drops *uint64
}
// netIPSocketLine represents the fields parsed from a single line
// in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
+ // Drops is non-nil for udp{,6}, but nil for tcp{,6}.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netIPSocketLine struct {
Sl uint64
@@ -66,6 +69,7 @@ type (
RxQueue uint64
UID uint64
Inode uint64
+ Drops *uint64
}
)
@@ -77,13 +81,14 @@ func newNetIPSocket(file string) (NetIPSocket, error) {
defer f.Close()
var netIPSocket NetIPSocket
+ isUDP := strings.Contains(file, "udp")
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
- line, err := parseNetIPSocketLine(fields)
+ line, err := parseNetIPSocketLine(fields, isUDP)
if err != nil {
return nil, err
}
@@ -104,19 +109,25 @@ func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
defer f.Close()
var netIPSocketSummary NetIPSocketSummary
+ var udpPacketDrops uint64
+ isUDP := strings.Contains(file, "udp")
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
- line, err := parseNetIPSocketLine(fields)
+ line, err := parseNetIPSocketLine(fields, isUDP)
if err != nil {
return nil, err
}
netIPSocketSummary.TxQueueLength += line.TxQueue
netIPSocketSummary.RxQueueLength += line.RxQueue
netIPSocketSummary.UsedSockets++
+ if isUDP {
+ udpPacketDrops += *line.Drops
+ netIPSocketSummary.Drops = &udpPacketDrops
+ }
}
if err := s.Err(); err != nil {
return nil, err
@@ -130,7 +141,7 @@ func parseIP(hexIP string) (net.IP, error) {
var byteIP []byte
byteIP, err := hex.DecodeString(hexIP)
if err != nil {
- return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP)
+ return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
}
switch len(byteIP) {
case 4:
@@ -144,16 +155,17 @@ func parseIP(hexIP string) (net.IP, error) {
}
return i, nil
default:
- return nil, fmt.Errorf("Unable to parse IP %s", hexIP)
+ return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil)
}
}
// parseNetIPSocketLine parses a single line, represented by a list of fields.
-func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
+func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
line := &netIPSocketLine{}
if len(fields) < 10 {
return nil, fmt.Errorf(
- "cannot parse net socket line as it has less then 10 columns %q",
+ "%w: Less than 10 columns found %q",
+ ErrFileParse,
strings.Join(fields, " "),
)
}
@@ -162,64 +174,74 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
// sl
s := strings.Split(fields[0], ":")
if len(s) != 2 {
- return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0])
+ return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0])
}
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
- return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
}
// local_address
l := strings.Split(fields[1], ":")
if len(l) != 2 {
- return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1])
+ return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1])
}
if line.LocalAddr, err = parseIP(l[0]); err != nil {
return nil, err
}
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
}
// remote_address
r := strings.Split(fields[2], ":")
if len(r) != 2 {
- return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1])
+ return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1])
}
if line.RemAddr, err = parseIP(r[0]); err != nil {
return nil, err
}
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
}
// st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse st value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
}
// tx_queue and rx_queue
q := strings.Split(fields[4], ":")
if len(q) != 2 {
return nil, fmt.Errorf(
- "cannot parse tx/rx queues in socket line as it has a missing colon %q",
+ "%w: Missing colon for tx/rx queues in socket line %q",
+ ErrFileParse,
fields[4],
)
}
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
}
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
}
// uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
- return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
}
// inode
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
- return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
+ }
+
+ // drops
+ if isUDP {
+ drops, err := strconv.ParseUint(fields[12], 0, 64)
+ if err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
+ }
+ line.Drops = &drops
}
return line, nil
diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go
index 374b6f73f82..b6c77b709fa 100644
--- a/vendor/github.com/prometheus/procfs/net_protocols.go
+++ b/vendor/github.com/prometheus/procfs/net_protocols.go
@@ -131,7 +131,7 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro
} else if fields[6] == disabled {
line.Slab = false
} else {
- return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name)
+ return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
}
line.ModuleName = fields[7]
@@ -173,7 +173,7 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro
} else if capabilities[i] == "n" {
*capabilityFields[i] = false
} else {
- return fmt.Errorf("unable to parse capability block for protocol: position %d", i)
+ return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
}
}
return nil
diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go
new file mode 100644
index 00000000000..deb7029fe1e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_route.go
@@ -0,0 +1,143 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+const (
+ blackholeRepresentation string = "*"
+ blackholeIfaceName string = "blackhole"
+ routeLineColumns int = 11
+)
+
+// A NetRouteLine represents one line from net/route.
+type NetRouteLine struct {
+ Iface string
+ Destination uint32
+ Gateway uint32
+ Flags uint32
+ RefCnt uint32
+ Use uint32
+ Metric uint32
+ Mask uint32
+ MTU uint32
+ Window uint32
+ IRTT uint32
+}
+
+func (fs FS) NetRoute() ([]NetRouteLine, error) {
+ return readNetRoute(fs.proc.Path("net", "route"))
+}
+
+func readNetRoute(path string) ([]NetRouteLine, error) {
+ b, err := util.ReadFileNoStat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ routelines, err := parseNetRoute(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read net route from %s: %w", path, err)
+ }
+ return routelines, nil
+}
+
+func parseNetRoute(r io.Reader) ([]NetRouteLine, error) {
+ var routelines []NetRouteLine
+
+ scanner := bufio.NewScanner(r)
+ scanner.Scan()
+ for scanner.Scan() {
+ fields := strings.Fields(scanner.Text())
+ routeline, err := parseNetRouteLine(fields)
+ if err != nil {
+ return nil, err
+ }
+ routelines = append(routelines, *routeline)
+ }
+ return routelines, nil
+}
+
+func parseNetRouteLine(fields []string) (*NetRouteLine, error) {
+ if len(fields) != routeLineColumns {
+ return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields))
+ }
+ iface := fields[0]
+ if iface == blackholeRepresentation {
+ iface = blackholeIfaceName
+ }
+ destination, err := strconv.ParseUint(fields[1], 16, 32)
+ if err != nil {
+ return nil, err
+ }
+ gateway, err := strconv.ParseUint(fields[2], 16, 32)
+ if err != nil {
+ return nil, err
+ }
+ flags, err := strconv.ParseUint(fields[3], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ refcnt, err := strconv.ParseUint(fields[4], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ use, err := strconv.ParseUint(fields[5], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ metric, err := strconv.ParseUint(fields[6], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ mask, err := strconv.ParseUint(fields[7], 16, 32)
+ if err != nil {
+ return nil, err
+ }
+ mtu, err := strconv.ParseUint(fields[8], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ window, err := strconv.ParseUint(fields[9], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ irtt, err := strconv.ParseUint(fields[10], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ routeline := &NetRouteLine{
+ Iface: iface,
+ Destination: uint32(destination),
+ Gateway: uint32(gateway),
+ Flags: uint32(flags),
+ RefCnt: uint32(refcnt),
+ Use: uint32(use),
+ Metric: uint32(metric),
+ Mask: uint32(mask),
+ MTU: uint32(mtu),
+ Window: uint32(window),
+ IRTT: uint32(irtt),
+ }
+ return routeline, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go
index e36f4872dd6..fae62b13d96 100644
--- a/vendor/github.com/prometheus/procfs/net_sockstat.go
+++ b/vendor/github.com/prometheus/procfs/net_sockstat.go
@@ -16,7 +16,6 @@ package procfs
import (
"bufio"
"bytes"
- "errors"
"fmt"
"io"
"strings"
@@ -70,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) {
stat, err := parseSockstat(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err)
+ return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err)
}
return stat, nil
@@ -84,13 +83,13 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// Expect a minimum of a protocol and one key/value pair.
fields := strings.Split(s.Text(), " ")
if len(fields) < 3 {
- return nil, fmt.Errorf("malformed sockstat line: %q", s.Text())
+ return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text())
}
// The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:])
if err != nil {
- return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err)
+ return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
}
// The first field is the protocol. We must trim its colon suffix.
@@ -119,7 +118,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// parseSockstatKVs parses a string slice into a map of key/value pairs.
func parseSockstatKVs(kvs []string) (map[string]int, error) {
if len(kvs)%2 != 0 {
- return nil, errors.New("odd number of fields in key/value pairs")
+ return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs)
}
// Iterate two values at a time to gather key/value pairs.
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
index a94f86dc4ae..71c8059f4d7 100644
--- a/vendor/github.com/prometheus/procfs/net_softnet.go
+++ b/vendor/github.com/prometheus/procfs/net_softnet.go
@@ -27,8 +27,9 @@ import (
// For the proc file format details,
// See:
// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
-// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
-// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
+// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
type SoftnetStat struct {
@@ -38,6 +39,18 @@ type SoftnetStat struct {
Dropped uint32
// Number of times processing packets ran out of quota.
TimeSqueezed uint32
+ // Number of collision occur while obtaining device lock while transmitting.
+ CPUCollision uint32
+ // Number of times cpu woken up received_rps.
+ ReceivedRps uint32
+ // number of times flow limit has been reached.
+ FlowLimitCount uint32
+ // Softnet backlog status.
+ SoftnetBacklogLen uint32
+ // CPU id owning this softnet_data.
+ Index uint32
+ // softnet_data's Width.
+ Width int
}
var softNetProcFile = "net/softnet_stat"
@@ -51,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err)
+ return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err)
}
return entries, nil
@@ -63,25 +76,65 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
s := bufio.NewScanner(r)
var stats []SoftnetStat
+ cpuIndex := 0
for s.Scan() {
columns := strings.Fields(s.Text())
width := len(columns)
+ softnetStat := SoftnetStat{}
if width < minColumns {
- return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
+ return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns)
}
- // We only parse the first three columns at the moment.
- us, err := parseHexUint32s(columns[0:3])
- if err != nil {
- return nil, err
+ // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347
+ if width >= minColumns {
+ us, err := parseHexUint32s(columns[0:9])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.Processed = us[0]
+ softnetStat.Dropped = us[1]
+ softnetStat.TimeSqueezed = us[2]
+ softnetStat.CPUCollision = us[8]
+ }
+
+ // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+ if width >= 10 {
+ us, err := parseHexUint32s(columns[9:10])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.ReceivedRps = us[0]
}
- stats = append(stats, SoftnetStat{
- Processed: us[0],
- Dropped: us[1],
- TimeSqueezed: us[2],
- })
+ // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+ if width >= 11 {
+ us, err := parseHexUint32s(columns[10:11])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.FlowLimitCount = us[0]
+ }
+
+ // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
+ if width >= 13 {
+ us, err := parseHexUint32s(columns[11:13])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.SoftnetBacklogLen = us[0]
+ softnetStat.Index = us[1]
+ } else {
+ // For older kernels, create the Index based on the scan line number.
+ softnetStat.Index = uint32(cpuIndex)
+ }
+ softnetStat.Width = width
+ stats = append(stats, softnetStat)
+ cpuIndex++
}
return stats, nil
diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go
new file mode 100644
index 00000000000..13994c1782f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_tls_stat.go
@@ -0,0 +1,119 @@
+// Copyright 2023 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// TLSStat struct represents data in /proc/net/tls_stat.
+// See https://docs.kernel.org/networking/tls.html#statistics
+type TLSStat struct {
+ // number of TX sessions currently installed where host handles cryptography
+ TLSCurrTxSw int
+ // number of RX sessions currently installed where host handles cryptography
+ TLSCurrRxSw int
+ // number of TX sessions currently installed where NIC handles cryptography
+ TLSCurrTxDevice int
+ // number of RX sessions currently installed where NIC handles cryptography
+ TLSCurrRxDevice int
+ //number of TX sessions opened with host cryptography
+ TLSTxSw int
+ //number of RX sessions opened with host cryptography
+ TLSRxSw int
+ // number of TX sessions opened with NIC cryptography
+ TLSTxDevice int
+ // number of RX sessions opened with NIC cryptography
+ TLSRxDevice int
+ // record decryption failed (e.g. due to incorrect authentication tag)
+ TLSDecryptError int
+ // number of RX resyncs sent to NICs handling cryptography
+ TLSRxDeviceResync int
+ // number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
+ TLSDecryptRetry int
+ // number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
+ TLSRxNoPadViolation int
+}
+
+// NewTLSStat reads the tls_stat statistics.
+func NewTLSStat() (TLSStat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return TLSStat{}, err
+ }
+
+ return fs.NewTLSStat()
+}
+
+// NewTLSStat reads the tls_stat statistics.
+func (fs FS) NewTLSStat() (TLSStat, error) {
+ file, err := os.Open(fs.proc.Path("net/tls_stat"))
+ if err != nil {
+ return TLSStat{}, err
+ }
+ defer file.Close()
+
+ var (
+ tlsstat = TLSStat{}
+ s = bufio.NewScanner(file)
+ )
+
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+
+ if len(fields) != 2 {
+ return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
+ }
+
+ name := fields[0]
+ value, err := strconv.Atoi(fields[1])
+ if err != nil {
+ return TLSStat{}, err
+ }
+
+ switch name {
+ case "TlsCurrTxSw":
+ tlsstat.TLSCurrTxSw = value
+ case "TlsCurrRxSw":
+ tlsstat.TLSCurrRxSw = value
+ case "TlsCurrTxDevice":
+ tlsstat.TLSCurrTxDevice = value
+ case "TlsCurrRxDevice":
+ tlsstat.TLSCurrRxDevice = value
+ case "TlsTxSw":
+ tlsstat.TLSTxSw = value
+ case "TlsRxSw":
+ tlsstat.TLSRxSw = value
+ case "TlsTxDevice":
+ tlsstat.TLSTxDevice = value
+ case "TlsRxDevice":
+ tlsstat.TLSRxDevice = value
+ case "TlsDecryptError":
+ tlsstat.TLSDecryptError = value
+ case "TlsRxDeviceResync":
+ tlsstat.TLSRxDeviceResync = value
+ case "TlsDecryptRetry":
+ tlsstat.TLSDecryptRetry = value
+ case "TlsRxNoPadViolation":
+ tlsstat.TLSRxNoPadViolation = value
+ }
+
+ }
+
+ return tlsstat, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
index 98aa8e1c31c..d868cebdaae 100644
--- a/vendor/github.com/prometheus/procfs/net_unix.go
+++ b/vendor/github.com/prometheus/procfs/net_unix.go
@@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields)
if err != nil {
- return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err)
+ return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
}
nu.Rows = append(nu.Rows, item)
}
if err := s.Err(); err != nil {
- return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err)
+ return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err)
}
return &nu, nil
@@ -126,7 +126,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
l := len(fields)
if l < min {
- return nil, fmt.Errorf("expected at least %d fields but got %d", min, l)
+ return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
}
// Field offsets are as follows:
@@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
users, err := u.parseUsers(fields[1])
if err != nil {
- return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err)
+ return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err)
}
flags, err := u.parseFlags(fields[3])
if err != nil {
- return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err)
+ return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
}
typ, err := u.parseType(fields[4])
if err != nil {
- return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err)
+ return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
}
state, err := u.parseState(fields[5])
if err != nil {
- return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err)
+ return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
}
var inode uint64
if hasInode {
inode, err = u.parseInode(fields[6])
if err != nil {
- return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err)
+ return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err)
}
}
diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go
new file mode 100644
index 00000000000..7c597bc8708
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_wireless.go
@@ -0,0 +1,182 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Wireless models the content of /proc/net/wireless.
+type Wireless struct {
+ Name string
+
+ // Status is the current 4-digit hex value status of the interface.
+ Status uint64
+
+ // QualityLink is the link quality.
+ QualityLink int
+
+ // QualityLevel is the signal gain (dBm).
+ QualityLevel int
+
+ // QualityNoise is the signal noise baseline (dBm).
+ QualityNoise int
+
+ // DiscardedNwid is the number of discarded packets with wrong nwid/essid.
+ DiscardedNwid int
+
+ // DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP).
+ DiscardedCrypt int
+
+ // DiscardedFrag is the number of discarded packets that can't perform MAC reassembly.
+ DiscardedFrag int
+
+ // DiscardedRetry is the number of discarded packets that reached max MAC retries.
+ DiscardedRetry int
+
+ // DiscardedMisc is the number of discarded packets for other reasons.
+ DiscardedMisc int
+
+ // MissedBeacon is the number of missed beacons/superframe.
+ MissedBeacon int
+}
+
+// Wireless returns kernel wireless statistics.
+func (fs FS) Wireless() ([]*Wireless, error) {
+ b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless"))
+ if err != nil {
+ return nil, err
+ }
+
+ m, err := parseWireless(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err)
+ }
+
+ return m, nil
+}
+
+// parseWireless parses the contents of /proc/net/wireless.
+/*
+Inter-| sta-| Quality | Discarded packets | Missed | WE
+face | tus | link level noise | nwid crypt frag retry misc | beacon | 22
+ eth1: 0000 5. -256. -10. 0 1 0 3 0 0
+ eth2: 0000 5. -256. -20. 0 2 0 4 0 0
+*/
+func parseWireless(r io.Reader) ([]*Wireless, error) {
+ var (
+ interfaces []*Wireless
+ scanner = bufio.NewScanner(r)
+ )
+
+ for n := 0; scanner.Scan(); n++ {
+ // Skip the 2 header lines.
+ if n < 2 {
+ continue
+ }
+
+ line := scanner.Text()
+
+ parts := strings.Split(line, ":")
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line)
+ }
+
+ name := strings.TrimSpace(parts[0])
+ stats := strings.Fields(parts[1])
+
+ if len(stats) < 10 {
+ return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line)
+ }
+
+ status, err := strconv.ParseUint(stats[0], 16, 16)
+ if err != nil {
+ return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line)
+ }
+
+ qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
+ if err != nil {
+ return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
+ }
+
+ qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
+ if err != nil {
+ return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
+ }
+
+ qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
+ if err != nil {
+ return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
+ }
+
+ dnwid, err := strconv.Atoi(stats[4])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
+ }
+
+ dcrypt, err := strconv.Atoi(stats[5])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
+ }
+
+ dfrag, err := strconv.Atoi(stats[6])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
+ }
+
+ dretry, err := strconv.Atoi(stats[7])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
+ }
+
+ dmisc, err := strconv.Atoi(stats[8])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
+ }
+
+ mbeacon, err := strconv.Atoi(stats[9])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
+ }
+
+ w := &Wireless{
+ Name: name,
+ Status: status,
+ QualityLink: qlink,
+ QualityLevel: qlevel,
+ QualityNoise: qnoise,
+ DiscardedNwid: dnwid,
+ DiscardedCrypt: dcrypt,
+ DiscardedFrag: dfrag,
+ DiscardedRetry: dretry,
+ DiscardedMisc: dmisc,
+ MissedBeacon: mbeacon,
+ }
+
+ interfaces = append(interfaces, w)
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
+ }
+
+ return interfaces, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go
index f9d9d243db3..932ef204684 100644
--- a/vendor/github.com/prometheus/procfs/net_xfrm.go
+++ b/vendor/github.com/prometheus/procfs/net_xfrm.go
@@ -115,7 +115,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) {
fields := strings.Fields(s.Text())
if len(fields) != 2 {
- return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text())
+ return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
}
name := fields[0]
diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go
index dcea9c5a671..742dff453ba 100644
--- a/vendor/github.com/prometheus/procfs/netstat.go
+++ b/vendor/github.com/prometheus/procfs/netstat.go
@@ -37,32 +37,46 @@ func (fs FS) NetStat() ([]NetStat, error) {
var netStatsTotal []NetStat
for _, filePath := range statFiles {
- file, err := os.Open(filePath)
+ procNetstat, err := parseNetstat(filePath)
if err != nil {
return nil, err
}
+ procNetstat.Filename = filepath.Base(filePath)
- netStatFile := NetStat{
- Filename: filepath.Base(filePath),
- Stats: make(map[string][]uint64),
- }
- scanner := bufio.NewScanner(file)
- scanner.Scan()
- // First string is always a header for stats
- var headers []string
- headers = append(headers, strings.Fields(scanner.Text())...)
+ netStatsTotal = append(netStatsTotal, procNetstat)
+ }
+ return netStatsTotal, nil
+}
+
+// parseNetstat parses the metrics from `/proc/net/stat/` file
+// and returns a NetStat structure.
+func parseNetstat(filePath string) (NetStat, error) {
+ netStat := NetStat{
+ Stats: make(map[string][]uint64),
+ }
+ file, err := os.Open(filePath)
+ if err != nil {
+ return netStat, err
+ }
+ defer file.Close()
+
+ scanner := bufio.NewScanner(file)
+ scanner.Scan()
- // Other strings represent per-CPU counters
- for scanner.Scan() {
- for num, counter := range strings.Fields(scanner.Text()) {
- value, err := strconv.ParseUint(counter, 16, 64)
- if err != nil {
- return nil, err
- }
- netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value)
+ // First string is always a header for stats
+ var headers []string
+ headers = append(headers, strings.Fields(scanner.Text())...)
+
+ // Other strings represent per-CPU counters
+ for scanner.Scan() {
+ for num, counter := range strings.Fields(scanner.Text()) {
+ value, err := strconv.ParseUint(counter, 16, 64)
+ if err != nil {
+ return NetStat{}, err
}
+ netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value)
}
- netStatsTotal = append(netStatsTotal, netStatFile)
}
- return netStatsTotal, nil
+
+ return netStat, nil
}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
index c30223af72a..142796368fe 100644
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -15,13 +15,13 @@ package procfs
import (
"bytes"
+ "errors"
"fmt"
"io"
"os"
"strconv"
"strings"
- "github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
@@ -30,12 +30,18 @@ type Proc struct {
// The process ID.
PID int
- fs fs.FS
+ fs FS
}
// Procs represents a list of Proc structs.
type Procs []Proc
+var (
+ ErrFileParse = errors.New("Error Parsing File")
+ ErrFileRead = errors.New("Error Reading File")
+ ErrMountPoint = errors.New("Error Accessing Mount point")
+)
+
func (p Procs) Len() int { return len(p) }
func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
@@ -43,7 +49,7 @@ func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
// Self returns a process for the current process read via /proc/self.
func Self() (Proc, error) {
fs, err := NewFS(DefaultMountPoint)
- if err != nil {
+ if err != nil || errors.Unwrap(err) == ErrMountPoint {
return Proc{}, err
}
return fs.Self()
@@ -92,7 +98,7 @@ func (fs FS) Proc(pid int) (Proc, error) {
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
return Proc{}, err
}
- return Proc{PID: pid, fs: fs.proc}, nil
+ return Proc{PID: pid, fs: fs}, nil
}
// AllProcs returns a list of all currently available processes.
@@ -105,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) {
names, err := d.Readdirnames(-1)
if err != nil {
- return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err)
+ return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
}
p := Procs{}
@@ -114,7 +120,7 @@ func (fs FS) AllProcs() (Procs, error) {
if err != nil {
continue
}
- p = append(p, Proc{PID: int(pid), fs: fs.proc})
+ p = append(p, Proc{PID: int(pid), fs: fs})
}
return p, nil
@@ -131,7 +137,7 @@ func (p Proc) CmdLine() ([]string, error) {
return []string{}, nil
}
- return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
+ return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil
}
// Wchan returns the wchan (wait channel) of a process.
@@ -206,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
for i, n := range names {
fd, err := strconv.ParseInt(n, 10, 32)
if err != nil {
- return nil, fmt.Errorf("could not parse fd %q: %w", n, err)
+ return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err)
}
fds[i] = uintptr(fd)
}
@@ -237,6 +243,19 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
// FileDescriptorsLen returns the number of currently open file descriptors of
// a process.
func (p Proc) FileDescriptorsLen() (int, error) {
+ // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
+ if p.fs.isReal {
+ stat, err := os.Stat(p.path("fd"))
+ if err != nil {
+ return 0, err
+ }
+
+ size := stat.Size()
+ if size > 0 {
+ return int(size), nil
+ }
+ }
+
fds, err := p.fileDescriptors()
if err != nil {
return 0, err
@@ -278,14 +297,14 @@ func (p Proc) fileDescriptors() ([]string, error) {
names, err := d.Readdirnames(-1)
if err != nil {
- return nil, fmt.Errorf("could not read %q: %w", d.Name(), err)
+ return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
}
return names, nil
}
func (p Proc) path(pa ...string) string {
- return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+ return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
}
// FileDescriptorsInfo retrieves information about all file descriptors of
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
index cca03327c3f..daeed7f571a 100644
--- a/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -23,7 +23,7 @@ import (
"github.com/prometheus/procfs/internal/util"
)
-// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a
+// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
@@ -51,7 +51,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) {
fields := strings.SplitN(cgroupStr, ":", 3)
if len(fields) < 3 {
- return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr)
+ return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr)
}
cgroup := &Cgroup{
@@ -60,7 +60,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) {
}
cgroup.HierarchyID, err = strconv.Atoi(fields[0])
if err != nil {
- return nil, fmt.Errorf("failed to parse hierarchy ID")
+ return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID)
}
if fields[1] != "" {
ssNames := strings.Split(fields[1], ",")
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go
index 24d4dce9cfc..5dd4938999a 100644
--- a/vendor/github.com/prometheus/procfs/proc_cgroups.go
+++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go
@@ -46,7 +46,7 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
fields := strings.Fields(CgroupSummaryStr)
// require at least 4 fields
if len(fields) < 4 {
- return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr)
+ return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr)
}
CgroupSummary := &CgroupSummary{
@@ -54,15 +54,15 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
}
CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1])
if err != nil {
- return nil, fmt.Errorf("failed to parse hierarchy ID")
+ return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1])
}
CgroupSummary.Cgroups, err = strconv.Atoi(fields[2])
if err != nil {
- return nil, fmt.Errorf("failed to parse Cgroup Num")
+ return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2])
}
CgroupSummary.Enabled, err = strconv.Atoi(fields[3])
if err != nil {
- return nil, fmt.Errorf("failed to parse Enabled")
+ return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3])
}
return CgroupSummary, nil
}
diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
index 1bbdd4a8e99..fa761b35295 100644
--- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go
+++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
@@ -26,6 +26,7 @@ var (
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
+ rIno = regexp.MustCompile(`^ino:\s+(\d+)$`)
rInotify = regexp.MustCompile(`^inotify`)
rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
)
@@ -40,6 +41,8 @@ type ProcFDInfo struct {
Flags string
// Mount point ID
MntID string
+ // Inode number
+ Ino string
// List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
InotifyInfos []InotifyInfo
}
@@ -51,7 +54,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
return nil, err
}
- var text, pos, flags, mntid string
+ var text, pos, flags, mntid, ino string
var inotify []InotifyInfo
scanner := bufio.NewScanner(bytes.NewReader(data))
@@ -63,6 +66,8 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
flags = rFlags.FindStringSubmatch(text)[1]
} else if rMntID.MatchString(text) {
mntid = rMntID.FindStringSubmatch(text)[1]
+ } else if rIno.MatchString(text) {
+ ino = rIno.FindStringSubmatch(text)[1]
} else if rInotify.MatchString(text) {
newInotify, err := parseInotifyInfo(text)
if err != nil {
@@ -77,6 +82,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
Pos: pos,
Flags: flags,
MntID: mntid,
+ Ino: ino,
InotifyInfos: inotify,
}
@@ -111,7 +117,7 @@ func parseInotifyInfo(line string) (*InotifyInfo, error) {
}
return i, nil
}
- return nil, fmt.Errorf("invalid inode entry: %q", line)
+ return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line)
}
// ProcFDInfos represents a list of ProcFDInfo structs.
diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go
new file mode 100644
index 00000000000..86b4b452463
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go
@@ -0,0 +1,98 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Interrupt represents a single interrupt line.
+type Interrupt struct {
+ // Info is the type of interrupt.
+ Info string
+ // Devices is the name of the device that is located at that IRQ
+ Devices string
+ // Values is the number of interrupts per CPU.
+ Values []string
+}
+
+// Interrupts models the content of /proc/interrupts. Key is the IRQ number.
+// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts
+// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output
+type Interrupts map[string]Interrupt
+
+// Interrupts creates a new instance from a given Proc instance.
+func (p Proc) Interrupts() (Interrupts, error) {
+ data, err := util.ReadFileNoStat(p.path("interrupts"))
+ if err != nil {
+ return nil, err
+ }
+ return parseInterrupts(bytes.NewReader(data))
+}
+
+func parseInterrupts(r io.Reader) (Interrupts, error) {
+ var (
+ interrupts = Interrupts{}
+ scanner = bufio.NewScanner(r)
+ )
+
+ if !scanner.Scan() {
+ return nil, errors.New("interrupts empty")
+ }
+ cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu
+
+ for scanner.Scan() {
+ parts := strings.Fields(scanner.Text())
+ if len(parts) == 0 { // skip empty lines
+ continue
+ }
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts)
+ }
+ intName := parts[0][:len(parts[0])-1] // remove trailing :
+
+ if len(parts) == 2 {
+ interrupts[intName] = Interrupt{
+ Info: "",
+ Devices: "",
+ Values: []string{
+ parts[1],
+ },
+ }
+ continue
+ }
+
+ intr := Interrupt{
+ Values: parts[1 : cpuNum+1],
+ }
+
+ if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt
+ intr.Info = parts[cpuNum+1]
+ intr.Devices = strings.Join(parts[cpuNum+2:], " ")
+ } else {
+ intr.Info = strings.Join(parts[cpuNum+1:], " ")
+ }
+ interrupts[intName] = intr
+ }
+
+ return interrupts, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
index 7a1388185a9..9530b14bc68 100644
--- a/vendor/github.com/prometheus/procfs/proc_limits.go
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -103,7 +103,7 @@ func (p Proc) Limits() (ProcLimits, error) {
//fields := limitsMatch.Split(s.Text(), limitsFields)
fields := limitsMatch.FindStringSubmatch(s.Text())
if len(fields) != limitsFields {
- return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text())
+ return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text())
}
switch fields[1] {
@@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) {
}
i, err := strconv.ParseUint(s, 10, 64)
if err != nil {
- return 0, fmt.Errorf("couldn't parse value %q: %w", s, err)
+ return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err)
}
return i, nil
}
diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go
index f1bcbf32bb3..7e75c286b5b 100644
--- a/vendor/github.com/prometheus/procfs/proc_maps.go
+++ b/vendor/github.com/prometheus/procfs/proc_maps.go
@@ -63,17 +63,17 @@ type ProcMap struct {
// parseDevice parses the device token of a line and converts it to a dev_t
// (mkdev) like structure.
func parseDevice(s string) (uint64, error) {
- toks := strings.Split(s, ":")
- if len(toks) < 2 {
- return 0, fmt.Errorf("unexpected number of fields")
+ i := strings.Index(s, ":")
+ if i == -1 {
+ return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s)
}
- major, err := strconv.ParseUint(toks[0], 16, 0)
+ major, err := strconv.ParseUint(s[0:i], 16, 0)
if err != nil {
return 0, err
}
- minor, err := strconv.ParseUint(toks[1], 16, 0)
+ minor, err := strconv.ParseUint(s[i+1:], 16, 0)
if err != nil {
return 0, err
}
@@ -93,17 +93,17 @@ func parseAddress(s string) (uintptr, error) {
// parseAddresses parses the start-end address.
func parseAddresses(s string) (uintptr, uintptr, error) {
- toks := strings.Split(s, "-")
- if len(toks) < 2 {
- return 0, 0, fmt.Errorf("invalid address")
+ idx := strings.Index(s, "-")
+ if idx == -1 {
+ return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s)
}
- saddr, err := parseAddress(toks[0])
+ saddr, err := parseAddress(s[0:idx])
if err != nil {
return 0, 0, err
}
- eaddr, err := parseAddress(toks[1])
+ eaddr, err := parseAddress(s[idx+1:])
if err != nil {
return 0, 0, err
}
@@ -114,7 +114,7 @@ func parseAddresses(s string) (uintptr, uintptr, error) {
// parsePermissions parses a token and returns any that are set.
func parsePermissions(s string) (*ProcMapPermissions, error) {
if len(s) < 4 {
- return nil, fmt.Errorf("invalid permissions token")
+ return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse)
}
perms := ProcMapPermissions{}
@@ -141,7 +141,7 @@ func parsePermissions(s string) (*ProcMapPermissions, error) {
func parseProcMap(text string) (*ProcMap, error) {
fields := strings.Fields(text)
if len(fields) < 5 {
- return nil, fmt.Errorf("truncated procmap entry")
+ return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse)
}
saddr, eaddr, err := parseAddresses(fields[0])
diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go
index 48b5238194e..8e3ff4d794b 100644
--- a/vendor/github.com/prometheus/procfs/proc_netstat.go
+++ b/vendor/github.com/prometheus/procfs/proc_netstat.go
@@ -33,139 +33,140 @@ type ProcNetstat struct {
}
type TcpExt struct { // nolint:revive
- SyncookiesSent float64
- SyncookiesRecv float64
- SyncookiesFailed float64
- EmbryonicRsts float64
- PruneCalled float64
- RcvPruned float64
- OfoPruned float64
- OutOfWindowIcmps float64
- LockDroppedIcmps float64
- ArpFilter float64
- TW float64
- TWRecycled float64
- TWKilled float64
- PAWSActive float64
- PAWSEstab float64
- DelayedACKs float64
- DelayedACKLocked float64
- DelayedACKLost float64
- ListenOverflows float64
- ListenDrops float64
- TCPHPHits float64
- TCPPureAcks float64
- TCPHPAcks float64
- TCPRenoRecovery float64
- TCPSackRecovery float64
- TCPSACKReneging float64
- TCPSACKReorder float64
- TCPRenoReorder float64
- TCPTSReorder float64
- TCPFullUndo float64
- TCPPartialUndo float64
- TCPDSACKUndo float64
- TCPLossUndo float64
- TCPLostRetransmit float64
- TCPRenoFailures float64
- TCPSackFailures float64
- TCPLossFailures float64
- TCPFastRetrans float64
- TCPSlowStartRetrans float64
- TCPTimeouts float64
- TCPLossProbes float64
- TCPLossProbeRecovery float64
- TCPRenoRecoveryFail float64
- TCPSackRecoveryFail float64
- TCPRcvCollapsed float64
- TCPDSACKOldSent float64
- TCPDSACKOfoSent float64
- TCPDSACKRecv float64
- TCPDSACKOfoRecv float64
- TCPAbortOnData float64
- TCPAbortOnClose float64
- TCPAbortOnMemory float64
- TCPAbortOnTimeout float64
- TCPAbortOnLinger float64
- TCPAbortFailed float64
- TCPMemoryPressures float64
- TCPMemoryPressuresChrono float64
- TCPSACKDiscard float64
- TCPDSACKIgnoredOld float64
- TCPDSACKIgnoredNoUndo float64
- TCPSpuriousRTOs float64
- TCPMD5NotFound float64
- TCPMD5Unexpected float64
- TCPMD5Failure float64
- TCPSackShifted float64
- TCPSackMerged float64
- TCPSackShiftFallback float64
- TCPBacklogDrop float64
- PFMemallocDrop float64
- TCPMinTTLDrop float64
- TCPDeferAcceptDrop float64
- IPReversePathFilter float64
- TCPTimeWaitOverflow float64
- TCPReqQFullDoCookies float64
- TCPReqQFullDrop float64
- TCPRetransFail float64
- TCPRcvCoalesce float64
- TCPOFOQueue float64
- TCPOFODrop float64
- TCPOFOMerge float64
- TCPChallengeACK float64
- TCPSYNChallenge float64
- TCPFastOpenActive float64
- TCPFastOpenActiveFail float64
- TCPFastOpenPassive float64
- TCPFastOpenPassiveFail float64
- TCPFastOpenListenOverflow float64
- TCPFastOpenCookieReqd float64
- TCPFastOpenBlackhole float64
- TCPSpuriousRtxHostQueues float64
- BusyPollRxPackets float64
- TCPAutoCorking float64
- TCPFromZeroWindowAdv float64
- TCPToZeroWindowAdv float64
- TCPWantZeroWindowAdv float64
- TCPSynRetrans float64
- TCPOrigDataSent float64
- TCPHystartTrainDetect float64
- TCPHystartTrainCwnd float64
- TCPHystartDelayDetect float64
- TCPHystartDelayCwnd float64
- TCPACKSkippedSynRecv float64
- TCPACKSkippedPAWS float64
- TCPACKSkippedSeq float64
- TCPACKSkippedFinWait2 float64
- TCPACKSkippedTimeWait float64
- TCPACKSkippedChallenge float64
- TCPWinProbe float64
- TCPKeepAlive float64
- TCPMTUPFail float64
- TCPMTUPSuccess float64
- TCPWqueueTooBig float64
+ SyncookiesSent *float64
+ SyncookiesRecv *float64
+ SyncookiesFailed *float64
+ EmbryonicRsts *float64
+ PruneCalled *float64
+ RcvPruned *float64
+ OfoPruned *float64
+ OutOfWindowIcmps *float64
+ LockDroppedIcmps *float64
+ ArpFilter *float64
+ TW *float64
+ TWRecycled *float64
+ TWKilled *float64
+ PAWSActive *float64
+ PAWSEstab *float64
+ DelayedACKs *float64
+ DelayedACKLocked *float64
+ DelayedACKLost *float64
+ ListenOverflows *float64
+ ListenDrops *float64
+ TCPHPHits *float64
+ TCPPureAcks *float64
+ TCPHPAcks *float64
+ TCPRenoRecovery *float64
+ TCPSackRecovery *float64
+ TCPSACKReneging *float64
+ TCPSACKReorder *float64
+ TCPRenoReorder *float64
+ TCPTSReorder *float64
+ TCPFullUndo *float64
+ TCPPartialUndo *float64
+ TCPDSACKUndo *float64
+ TCPLossUndo *float64
+ TCPLostRetransmit *float64
+ TCPRenoFailures *float64
+ TCPSackFailures *float64
+ TCPLossFailures *float64
+ TCPFastRetrans *float64
+ TCPSlowStartRetrans *float64
+ TCPTimeouts *float64
+ TCPLossProbes *float64
+ TCPLossProbeRecovery *float64
+ TCPRenoRecoveryFail *float64
+ TCPSackRecoveryFail *float64
+ TCPRcvCollapsed *float64
+ TCPDSACKOldSent *float64
+ TCPDSACKOfoSent *float64
+ TCPDSACKRecv *float64
+ TCPDSACKOfoRecv *float64
+ TCPAbortOnData *float64
+ TCPAbortOnClose *float64
+ TCPAbortOnMemory *float64
+ TCPAbortOnTimeout *float64
+ TCPAbortOnLinger *float64
+ TCPAbortFailed *float64
+ TCPMemoryPressures *float64
+ TCPMemoryPressuresChrono *float64
+ TCPSACKDiscard *float64
+ TCPDSACKIgnoredOld *float64
+ TCPDSACKIgnoredNoUndo *float64
+ TCPSpuriousRTOs *float64
+ TCPMD5NotFound *float64
+ TCPMD5Unexpected *float64
+ TCPMD5Failure *float64
+ TCPSackShifted *float64
+ TCPSackMerged *float64
+ TCPSackShiftFallback *float64
+ TCPBacklogDrop *float64
+ PFMemallocDrop *float64
+ TCPMinTTLDrop *float64
+ TCPDeferAcceptDrop *float64
+ IPReversePathFilter *float64
+ TCPTimeWaitOverflow *float64
+ TCPReqQFullDoCookies *float64
+ TCPReqQFullDrop *float64
+ TCPRetransFail *float64
+ TCPRcvCoalesce *float64
+ TCPRcvQDrop *float64
+ TCPOFOQueue *float64
+ TCPOFODrop *float64
+ TCPOFOMerge *float64
+ TCPChallengeACK *float64
+ TCPSYNChallenge *float64
+ TCPFastOpenActive *float64
+ TCPFastOpenActiveFail *float64
+ TCPFastOpenPassive *float64
+ TCPFastOpenPassiveFail *float64
+ TCPFastOpenListenOverflow *float64
+ TCPFastOpenCookieReqd *float64
+ TCPFastOpenBlackhole *float64
+ TCPSpuriousRtxHostQueues *float64
+ BusyPollRxPackets *float64
+ TCPAutoCorking *float64
+ TCPFromZeroWindowAdv *float64
+ TCPToZeroWindowAdv *float64
+ TCPWantZeroWindowAdv *float64
+ TCPSynRetrans *float64
+ TCPOrigDataSent *float64
+ TCPHystartTrainDetect *float64
+ TCPHystartTrainCwnd *float64
+ TCPHystartDelayDetect *float64
+ TCPHystartDelayCwnd *float64
+ TCPACKSkippedSynRecv *float64
+ TCPACKSkippedPAWS *float64
+ TCPACKSkippedSeq *float64
+ TCPACKSkippedFinWait2 *float64
+ TCPACKSkippedTimeWait *float64
+ TCPACKSkippedChallenge *float64
+ TCPWinProbe *float64
+ TCPKeepAlive *float64
+ TCPMTUPFail *float64
+ TCPMTUPSuccess *float64
+ TCPWqueueTooBig *float64
}
type IpExt struct { // nolint:revive
- InNoRoutes float64
- InTruncatedPkts float64
- InMcastPkts float64
- OutMcastPkts float64
- InBcastPkts float64
- OutBcastPkts float64
- InOctets float64
- OutOctets float64
- InMcastOctets float64
- OutMcastOctets float64
- InBcastOctets float64
- OutBcastOctets float64
- InCsumErrors float64
- InNoECTPkts float64
- InECT1Pkts float64
- InECT0Pkts float64
- InCEPkts float64
- ReasmOverlaps float64
+ InNoRoutes *float64
+ InTruncatedPkts *float64
+ InMcastPkts *float64
+ OutMcastPkts *float64
+ InBcastPkts *float64
+ OutBcastPkts *float64
+ InOctets *float64
+ OutOctets *float64
+ InMcastOctets *float64
+ OutMcastOctets *float64
+ InBcastOctets *float64
+ OutBcastOctets *float64
+ InCsumErrors *float64
+ InNoECTPkts *float64
+ InECT1Pkts *float64
+ InECT0Pkts *float64
+ InCEPkts *float64
+ ReasmOverlaps *float64
}
func (p Proc) Netstat() (ProcNetstat, error) {
@@ -174,14 +175,14 @@ func (p Proc) Netstat() (ProcNetstat, error) {
if err != nil {
return ProcNetstat{PID: p.PID}, err
}
- procNetstat, err := parseNetstat(bytes.NewReader(data), filename)
+ procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename)
procNetstat.PID = p.PID
return procNetstat, err
}
-// parseNetstat parses the metrics from proc//net/netstat file
+// parseProcNetstat parses the metrics from proc//net/netstat file
// and returns a ProcNetstat structure.
-func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
+func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
var (
scanner = bufio.NewScanner(r)
procNetstat = ProcNetstat{}
@@ -194,8 +195,8 @@ func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
// Remove trailing :.
protocol := strings.TrimSuffix(nameParts[0], ":")
if len(nameParts) != len(valueParts) {
- return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s",
- fileName, protocol)
+ return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
+ ErrFileParse, fileName, protocol)
}
for i := 1; i < len(nameParts); i++ {
value, err := strconv.ParseFloat(valueParts[i], 64)
@@ -208,230 +209,232 @@ func parseNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
case "TcpExt":
switch key {
case "SyncookiesSent":
- procNetstat.TcpExt.SyncookiesSent = value
+ procNetstat.TcpExt.SyncookiesSent = &value
case "SyncookiesRecv":
- procNetstat.TcpExt.SyncookiesRecv = value
+ procNetstat.TcpExt.SyncookiesRecv = &value
case "SyncookiesFailed":
- procNetstat.TcpExt.SyncookiesFailed = value
+ procNetstat.TcpExt.SyncookiesFailed = &value
case "EmbryonicRsts":
- procNetstat.TcpExt.EmbryonicRsts = value
+ procNetstat.TcpExt.EmbryonicRsts = &value
case "PruneCalled":
- procNetstat.TcpExt.PruneCalled = value
+ procNetstat.TcpExt.PruneCalled = &value
case "RcvPruned":
- procNetstat.TcpExt.RcvPruned = value
+ procNetstat.TcpExt.RcvPruned = &value
case "OfoPruned":
- procNetstat.TcpExt.OfoPruned = value
+ procNetstat.TcpExt.OfoPruned = &value
case "OutOfWindowIcmps":
- procNetstat.TcpExt.OutOfWindowIcmps = value
+ procNetstat.TcpExt.OutOfWindowIcmps = &value
case "LockDroppedIcmps":
- procNetstat.TcpExt.LockDroppedIcmps = value
+ procNetstat.TcpExt.LockDroppedIcmps = &value
case "ArpFilter":
- procNetstat.TcpExt.ArpFilter = value
+ procNetstat.TcpExt.ArpFilter = &value
case "TW":
- procNetstat.TcpExt.TW = value
+ procNetstat.TcpExt.TW = &value
case "TWRecycled":
- procNetstat.TcpExt.TWRecycled = value
+ procNetstat.TcpExt.TWRecycled = &value
case "TWKilled":
- procNetstat.TcpExt.TWKilled = value
+ procNetstat.TcpExt.TWKilled = &value
case "PAWSActive":
- procNetstat.TcpExt.PAWSActive = value
+ procNetstat.TcpExt.PAWSActive = &value
case "PAWSEstab":
- procNetstat.TcpExt.PAWSEstab = value
+ procNetstat.TcpExt.PAWSEstab = &value
case "DelayedACKs":
- procNetstat.TcpExt.DelayedACKs = value
+ procNetstat.TcpExt.DelayedACKs = &value
case "DelayedACKLocked":
- procNetstat.TcpExt.DelayedACKLocked = value
+ procNetstat.TcpExt.DelayedACKLocked = &value
case "DelayedACKLost":
- procNetstat.TcpExt.DelayedACKLost = value
+ procNetstat.TcpExt.DelayedACKLost = &value
case "ListenOverflows":
- procNetstat.TcpExt.ListenOverflows = value
+ procNetstat.TcpExt.ListenOverflows = &value
case "ListenDrops":
- procNetstat.TcpExt.ListenDrops = value
+ procNetstat.TcpExt.ListenDrops = &value
case "TCPHPHits":
- procNetstat.TcpExt.TCPHPHits = value
+ procNetstat.TcpExt.TCPHPHits = &value
case "TCPPureAcks":
- procNetstat.TcpExt.TCPPureAcks = value
+ procNetstat.TcpExt.TCPPureAcks = &value
case "TCPHPAcks":
- procNetstat.TcpExt.TCPHPAcks = value
+ procNetstat.TcpExt.TCPHPAcks = &value
case "TCPRenoRecovery":
- procNetstat.TcpExt.TCPRenoRecovery = value
+ procNetstat.TcpExt.TCPRenoRecovery = &value
case "TCPSackRecovery":
- procNetstat.TcpExt.TCPSackRecovery = value
+ procNetstat.TcpExt.TCPSackRecovery = &value
case "TCPSACKReneging":
- procNetstat.TcpExt.TCPSACKReneging = value
+ procNetstat.TcpExt.TCPSACKReneging = &value
case "TCPSACKReorder":
- procNetstat.TcpExt.TCPSACKReorder = value
+ procNetstat.TcpExt.TCPSACKReorder = &value
case "TCPRenoReorder":
- procNetstat.TcpExt.TCPRenoReorder = value
+ procNetstat.TcpExt.TCPRenoReorder = &value
case "TCPTSReorder":
- procNetstat.TcpExt.TCPTSReorder = value
+ procNetstat.TcpExt.TCPTSReorder = &value
case "TCPFullUndo":
- procNetstat.TcpExt.TCPFullUndo = value
+ procNetstat.TcpExt.TCPFullUndo = &value
case "TCPPartialUndo":
- procNetstat.TcpExt.TCPPartialUndo = value
+ procNetstat.TcpExt.TCPPartialUndo = &value
case "TCPDSACKUndo":
- procNetstat.TcpExt.TCPDSACKUndo = value
+ procNetstat.TcpExt.TCPDSACKUndo = &value
case "TCPLossUndo":
- procNetstat.TcpExt.TCPLossUndo = value
+ procNetstat.TcpExt.TCPLossUndo = &value
case "TCPLostRetransmit":
- procNetstat.TcpExt.TCPLostRetransmit = value
+ procNetstat.TcpExt.TCPLostRetransmit = &value
case "TCPRenoFailures":
- procNetstat.TcpExt.TCPRenoFailures = value
+ procNetstat.TcpExt.TCPRenoFailures = &value
case "TCPSackFailures":
- procNetstat.TcpExt.TCPSackFailures = value
+ procNetstat.TcpExt.TCPSackFailures = &value
case "TCPLossFailures":
- procNetstat.TcpExt.TCPLossFailures = value
+ procNetstat.TcpExt.TCPLossFailures = &value
case "TCPFastRetrans":
- procNetstat.TcpExt.TCPFastRetrans = value
+ procNetstat.TcpExt.TCPFastRetrans = &value
case "TCPSlowStartRetrans":
- procNetstat.TcpExt.TCPSlowStartRetrans = value
+ procNetstat.TcpExt.TCPSlowStartRetrans = &value
case "TCPTimeouts":
- procNetstat.TcpExt.TCPTimeouts = value
+ procNetstat.TcpExt.TCPTimeouts = &value
case "TCPLossProbes":
- procNetstat.TcpExt.TCPLossProbes = value
+ procNetstat.TcpExt.TCPLossProbes = &value
case "TCPLossProbeRecovery":
- procNetstat.TcpExt.TCPLossProbeRecovery = value
+ procNetstat.TcpExt.TCPLossProbeRecovery = &value
case "TCPRenoRecoveryFail":
- procNetstat.TcpExt.TCPRenoRecoveryFail = value
+ procNetstat.TcpExt.TCPRenoRecoveryFail = &value
case "TCPSackRecoveryFail":
- procNetstat.TcpExt.TCPSackRecoveryFail = value
+ procNetstat.TcpExt.TCPSackRecoveryFail = &value
case "TCPRcvCollapsed":
- procNetstat.TcpExt.TCPRcvCollapsed = value
+ procNetstat.TcpExt.TCPRcvCollapsed = &value
case "TCPDSACKOldSent":
- procNetstat.TcpExt.TCPDSACKOldSent = value
+ procNetstat.TcpExt.TCPDSACKOldSent = &value
case "TCPDSACKOfoSent":
- procNetstat.TcpExt.TCPDSACKOfoSent = value
+ procNetstat.TcpExt.TCPDSACKOfoSent = &value
case "TCPDSACKRecv":
- procNetstat.TcpExt.TCPDSACKRecv = value
+ procNetstat.TcpExt.TCPDSACKRecv = &value
case "TCPDSACKOfoRecv":
- procNetstat.TcpExt.TCPDSACKOfoRecv = value
+ procNetstat.TcpExt.TCPDSACKOfoRecv = &value
case "TCPAbortOnData":
- procNetstat.TcpExt.TCPAbortOnData = value
+ procNetstat.TcpExt.TCPAbortOnData = &value
case "TCPAbortOnClose":
- procNetstat.TcpExt.TCPAbortOnClose = value
+ procNetstat.TcpExt.TCPAbortOnClose = &value
case "TCPDeferAcceptDrop":
- procNetstat.TcpExt.TCPDeferAcceptDrop = value
+ procNetstat.TcpExt.TCPDeferAcceptDrop = &value
case "IPReversePathFilter":
- procNetstat.TcpExt.IPReversePathFilter = value
+ procNetstat.TcpExt.IPReversePathFilter = &value
case "TCPTimeWaitOverflow":
- procNetstat.TcpExt.TCPTimeWaitOverflow = value
+ procNetstat.TcpExt.TCPTimeWaitOverflow = &value
case "TCPReqQFullDoCookies":
- procNetstat.TcpExt.TCPReqQFullDoCookies = value
+ procNetstat.TcpExt.TCPReqQFullDoCookies = &value
case "TCPReqQFullDrop":
- procNetstat.TcpExt.TCPReqQFullDrop = value
+ procNetstat.TcpExt.TCPReqQFullDrop = &value
case "TCPRetransFail":
- procNetstat.TcpExt.TCPRetransFail = value
+ procNetstat.TcpExt.TCPRetransFail = &value
case "TCPRcvCoalesce":
- procNetstat.TcpExt.TCPRcvCoalesce = value
+ procNetstat.TcpExt.TCPRcvCoalesce = &value
+ case "TCPRcvQDrop":
+ procNetstat.TcpExt.TCPRcvQDrop = &value
case "TCPOFOQueue":
- procNetstat.TcpExt.TCPOFOQueue = value
+ procNetstat.TcpExt.TCPOFOQueue = &value
case "TCPOFODrop":
- procNetstat.TcpExt.TCPOFODrop = value
+ procNetstat.TcpExt.TCPOFODrop = &value
case "TCPOFOMerge":
- procNetstat.TcpExt.TCPOFOMerge = value
+ procNetstat.TcpExt.TCPOFOMerge = &value
case "TCPChallengeACK":
- procNetstat.TcpExt.TCPChallengeACK = value
+ procNetstat.TcpExt.TCPChallengeACK = &value
case "TCPSYNChallenge":
- procNetstat.TcpExt.TCPSYNChallenge = value
+ procNetstat.TcpExt.TCPSYNChallenge = &value
case "TCPFastOpenActive":
- procNetstat.TcpExt.TCPFastOpenActive = value
+ procNetstat.TcpExt.TCPFastOpenActive = &value
case "TCPFastOpenActiveFail":
- procNetstat.TcpExt.TCPFastOpenActiveFail = value
+ procNetstat.TcpExt.TCPFastOpenActiveFail = &value
case "TCPFastOpenPassive":
- procNetstat.TcpExt.TCPFastOpenPassive = value
+ procNetstat.TcpExt.TCPFastOpenPassive = &value
case "TCPFastOpenPassiveFail":
- procNetstat.TcpExt.TCPFastOpenPassiveFail = value
+ procNetstat.TcpExt.TCPFastOpenPassiveFail = &value
case "TCPFastOpenListenOverflow":
- procNetstat.TcpExt.TCPFastOpenListenOverflow = value
+ procNetstat.TcpExt.TCPFastOpenListenOverflow = &value
case "TCPFastOpenCookieReqd":
- procNetstat.TcpExt.TCPFastOpenCookieReqd = value
+ procNetstat.TcpExt.TCPFastOpenCookieReqd = &value
case "TCPFastOpenBlackhole":
- procNetstat.TcpExt.TCPFastOpenBlackhole = value
+ procNetstat.TcpExt.TCPFastOpenBlackhole = &value
case "TCPSpuriousRtxHostQueues":
- procNetstat.TcpExt.TCPSpuriousRtxHostQueues = value
+ procNetstat.TcpExt.TCPSpuriousRtxHostQueues = &value
case "BusyPollRxPackets":
- procNetstat.TcpExt.BusyPollRxPackets = value
+ procNetstat.TcpExt.BusyPollRxPackets = &value
case "TCPAutoCorking":
- procNetstat.TcpExt.TCPAutoCorking = value
+ procNetstat.TcpExt.TCPAutoCorking = &value
case "TCPFromZeroWindowAdv":
- procNetstat.TcpExt.TCPFromZeroWindowAdv = value
+ procNetstat.TcpExt.TCPFromZeroWindowAdv = &value
case "TCPToZeroWindowAdv":
- procNetstat.TcpExt.TCPToZeroWindowAdv = value
+ procNetstat.TcpExt.TCPToZeroWindowAdv = &value
case "TCPWantZeroWindowAdv":
- procNetstat.TcpExt.TCPWantZeroWindowAdv = value
+ procNetstat.TcpExt.TCPWantZeroWindowAdv = &value
case "TCPSynRetrans":
- procNetstat.TcpExt.TCPSynRetrans = value
+ procNetstat.TcpExt.TCPSynRetrans = &value
case "TCPOrigDataSent":
- procNetstat.TcpExt.TCPOrigDataSent = value
+ procNetstat.TcpExt.TCPOrigDataSent = &value
case "TCPHystartTrainDetect":
- procNetstat.TcpExt.TCPHystartTrainDetect = value
+ procNetstat.TcpExt.TCPHystartTrainDetect = &value
case "TCPHystartTrainCwnd":
- procNetstat.TcpExt.TCPHystartTrainCwnd = value
+ procNetstat.TcpExt.TCPHystartTrainCwnd = &value
case "TCPHystartDelayDetect":
- procNetstat.TcpExt.TCPHystartDelayDetect = value
+ procNetstat.TcpExt.TCPHystartDelayDetect = &value
case "TCPHystartDelayCwnd":
- procNetstat.TcpExt.TCPHystartDelayCwnd = value
+ procNetstat.TcpExt.TCPHystartDelayCwnd = &value
case "TCPACKSkippedSynRecv":
- procNetstat.TcpExt.TCPACKSkippedSynRecv = value
+ procNetstat.TcpExt.TCPACKSkippedSynRecv = &value
case "TCPACKSkippedPAWS":
- procNetstat.TcpExt.TCPACKSkippedPAWS = value
+ procNetstat.TcpExt.TCPACKSkippedPAWS = &value
case "TCPACKSkippedSeq":
- procNetstat.TcpExt.TCPACKSkippedSeq = value
+ procNetstat.TcpExt.TCPACKSkippedSeq = &value
case "TCPACKSkippedFinWait2":
- procNetstat.TcpExt.TCPACKSkippedFinWait2 = value
+ procNetstat.TcpExt.TCPACKSkippedFinWait2 = &value
case "TCPACKSkippedTimeWait":
- procNetstat.TcpExt.TCPACKSkippedTimeWait = value
+ procNetstat.TcpExt.TCPACKSkippedTimeWait = &value
case "TCPACKSkippedChallenge":
- procNetstat.TcpExt.TCPACKSkippedChallenge = value
+ procNetstat.TcpExt.TCPACKSkippedChallenge = &value
case "TCPWinProbe":
- procNetstat.TcpExt.TCPWinProbe = value
+ procNetstat.TcpExt.TCPWinProbe = &value
case "TCPKeepAlive":
- procNetstat.TcpExt.TCPKeepAlive = value
+ procNetstat.TcpExt.TCPKeepAlive = &value
case "TCPMTUPFail":
- procNetstat.TcpExt.TCPMTUPFail = value
+ procNetstat.TcpExt.TCPMTUPFail = &value
case "TCPMTUPSuccess":
- procNetstat.TcpExt.TCPMTUPSuccess = value
+ procNetstat.TcpExt.TCPMTUPSuccess = &value
case "TCPWqueueTooBig":
- procNetstat.TcpExt.TCPWqueueTooBig = value
+ procNetstat.TcpExt.TCPWqueueTooBig = &value
}
case "IpExt":
switch key {
case "InNoRoutes":
- procNetstat.IpExt.InNoRoutes = value
+ procNetstat.IpExt.InNoRoutes = &value
case "InTruncatedPkts":
- procNetstat.IpExt.InTruncatedPkts = value
+ procNetstat.IpExt.InTruncatedPkts = &value
case "InMcastPkts":
- procNetstat.IpExt.InMcastPkts = value
+ procNetstat.IpExt.InMcastPkts = &value
case "OutMcastPkts":
- procNetstat.IpExt.OutMcastPkts = value
+ procNetstat.IpExt.OutMcastPkts = &value
case "InBcastPkts":
- procNetstat.IpExt.InBcastPkts = value
+ procNetstat.IpExt.InBcastPkts = &value
case "OutBcastPkts":
- procNetstat.IpExt.OutBcastPkts = value
+ procNetstat.IpExt.OutBcastPkts = &value
case "InOctets":
- procNetstat.IpExt.InOctets = value
+ procNetstat.IpExt.InOctets = &value
case "OutOctets":
- procNetstat.IpExt.OutOctets = value
+ procNetstat.IpExt.OutOctets = &value
case "InMcastOctets":
- procNetstat.IpExt.InMcastOctets = value
+ procNetstat.IpExt.InMcastOctets = &value
case "OutMcastOctets":
- procNetstat.IpExt.OutMcastOctets = value
+ procNetstat.IpExt.OutMcastOctets = &value
case "InBcastOctets":
- procNetstat.IpExt.InBcastOctets = value
+ procNetstat.IpExt.InBcastOctets = &value
case "OutBcastOctets":
- procNetstat.IpExt.OutBcastOctets = value
+ procNetstat.IpExt.OutBcastOctets = &value
case "InCsumErrors":
- procNetstat.IpExt.InCsumErrors = value
+ procNetstat.IpExt.InCsumErrors = &value
case "InNoECTPkts":
- procNetstat.IpExt.InNoECTPkts = value
+ procNetstat.IpExt.InNoECTPkts = &value
case "InECT1Pkts":
- procNetstat.IpExt.InECT1Pkts = value
+ procNetstat.IpExt.InECT1Pkts = &value
case "InECT0Pkts":
- procNetstat.IpExt.InECT0Pkts = value
+ procNetstat.IpExt.InECT0Pkts = &value
case "InCEPkts":
- procNetstat.IpExt.InCEPkts = value
+ procNetstat.IpExt.InCEPkts = &value
case "ReasmOverlaps":
- procNetstat.IpExt.ReasmOverlaps = value
+ procNetstat.IpExt.ReasmOverlaps = &value
}
}
}
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
index 391b4cbd11b..0f8f847f954 100644
--- a/vendor/github.com/prometheus/procfs/proc_ns.go
+++ b/vendor/github.com/prometheus/procfs/proc_ns.go
@@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
names, err := d.Readdirnames(-1)
if err != nil {
- return nil, fmt.Errorf("failed to read contents of ns dir: %w", err)
+ return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err)
}
ns := make(Namespaces, len(names))
@@ -52,13 +52,13 @@ func (p Proc) Namespaces() (Namespaces, error) {
fields := strings.SplitN(target, ":", 2)
if len(fields) != 2 {
- return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target)
+ return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target)
}
typ := fields[0]
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
if err != nil {
- return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err)
+ return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err)
}
ns[name] = Namespace{typ, uint32(inode)}
diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go
index a68fe15290a..ccd35f153a0 100644
--- a/vendor/github.com/prometheus/procfs/proc_psi.go
+++ b/vendor/github.com/prometheus/procfs/proc_psi.go
@@ -61,14 +61,14 @@ type PSIStats struct {
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil {
- return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err)
+ return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
}
- return parsePSIStats(resource, bytes.NewReader(data))
+ return parsePSIStats(bytes.NewReader(data))
}
// parsePSIStats parses the specified file for pressure stall information.
-func parsePSIStats(resource string, r io.Reader) (PSIStats, error) {
+func parsePSIStats(r io.Reader) (PSIStats, error) {
psiStats := PSIStats{}
scanner := bufio.NewScanner(r)
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
index 0e97d99575e..09060e82080 100644
--- a/vendor/github.com/prometheus/procfs/proc_smaps.go
+++ b/vendor/github.com/prometheus/procfs/proc_smaps.go
@@ -127,7 +127,7 @@ func (s *ProcSMapsRollup) parseLine(line string) error {
}
v := strings.TrimSpace(kv[1])
- v = strings.TrimRight(v, " kB")
+ v = strings.TrimSuffix(v, " kB")
vKBytes, err := strconv.ParseUint(v, 10, 64)
if err != nil {
@@ -135,12 +135,12 @@ func (s *ProcSMapsRollup) parseLine(line string) error {
}
vBytes := vKBytes * 1024
- s.addValue(k, v, vKBytes, vBytes)
+ s.addValue(k, vBytes)
return nil
}
-func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) {
+func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) {
switch k {
case "Rss":
s.Rss += vUintBytes
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go
index ae191896cbd..b9d2cf642a7 100644
--- a/vendor/github.com/prometheus/procfs/proc_snmp.go
+++ b/vendor/github.com/prometheus/procfs/proc_snmp.go
@@ -37,100 +37,100 @@ type ProcSnmp struct {
}
type Ip struct { // nolint:revive
- Forwarding float64
- DefaultTTL float64
- InReceives float64
- InHdrErrors float64
- InAddrErrors float64
- ForwDatagrams float64
- InUnknownProtos float64
- InDiscards float64
- InDelivers float64
- OutRequests float64
- OutDiscards float64
- OutNoRoutes float64
- ReasmTimeout float64
- ReasmReqds float64
- ReasmOKs float64
- ReasmFails float64
- FragOKs float64
- FragFails float64
- FragCreates float64
+ Forwarding *float64
+ DefaultTTL *float64
+ InReceives *float64
+ InHdrErrors *float64
+ InAddrErrors *float64
+ ForwDatagrams *float64
+ InUnknownProtos *float64
+ InDiscards *float64
+ InDelivers *float64
+ OutRequests *float64
+ OutDiscards *float64
+ OutNoRoutes *float64
+ ReasmTimeout *float64
+ ReasmReqds *float64
+ ReasmOKs *float64
+ ReasmFails *float64
+ FragOKs *float64
+ FragFails *float64
+ FragCreates *float64
}
-type Icmp struct {
- InMsgs float64
- InErrors float64
- InCsumErrors float64
- InDestUnreachs float64
- InTimeExcds float64
- InParmProbs float64
- InSrcQuenchs float64
- InRedirects float64
- InEchos float64
- InEchoReps float64
- InTimestamps float64
- InTimestampReps float64
- InAddrMasks float64
- InAddrMaskReps float64
- OutMsgs float64
- OutErrors float64
- OutDestUnreachs float64
- OutTimeExcds float64
- OutParmProbs float64
- OutSrcQuenchs float64
- OutRedirects float64
- OutEchos float64
- OutEchoReps float64
- OutTimestamps float64
- OutTimestampReps float64
- OutAddrMasks float64
- OutAddrMaskReps float64
+type Icmp struct { // nolint:revive
+ InMsgs *float64
+ InErrors *float64
+ InCsumErrors *float64
+ InDestUnreachs *float64
+ InTimeExcds *float64
+ InParmProbs *float64
+ InSrcQuenchs *float64
+ InRedirects *float64
+ InEchos *float64
+ InEchoReps *float64
+ InTimestamps *float64
+ InTimestampReps *float64
+ InAddrMasks *float64
+ InAddrMaskReps *float64
+ OutMsgs *float64
+ OutErrors *float64
+ OutDestUnreachs *float64
+ OutTimeExcds *float64
+ OutParmProbs *float64
+ OutSrcQuenchs *float64
+ OutRedirects *float64
+ OutEchos *float64
+ OutEchoReps *float64
+ OutTimestamps *float64
+ OutTimestampReps *float64
+ OutAddrMasks *float64
+ OutAddrMaskReps *float64
}
type IcmpMsg struct {
- InType3 float64
- OutType3 float64
+ InType3 *float64
+ OutType3 *float64
}
type Tcp struct { // nolint:revive
- RtoAlgorithm float64
- RtoMin float64
- RtoMax float64
- MaxConn float64
- ActiveOpens float64
- PassiveOpens float64
- AttemptFails float64
- EstabResets float64
- CurrEstab float64
- InSegs float64
- OutSegs float64
- RetransSegs float64
- InErrs float64
- OutRsts float64
- InCsumErrors float64
+ RtoAlgorithm *float64
+ RtoMin *float64
+ RtoMax *float64
+ MaxConn *float64
+ ActiveOpens *float64
+ PassiveOpens *float64
+ AttemptFails *float64
+ EstabResets *float64
+ CurrEstab *float64
+ InSegs *float64
+ OutSegs *float64
+ RetransSegs *float64
+ InErrs *float64
+ OutRsts *float64
+ InCsumErrors *float64
}
type Udp struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
- IgnoredMulti float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
}
type UdpLite struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
- IgnoredMulti float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
}
func (p Proc) Snmp() (ProcSnmp, error) {
@@ -159,8 +159,8 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
// Remove trailing :.
protocol := strings.TrimSuffix(nameParts[0], ":")
if len(nameParts) != len(valueParts) {
- return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s",
- fileName, protocol)
+ return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
+ ErrFileParse, fileName, protocol)
}
for i := 1; i < len(nameParts); i++ {
value, err := strconv.ParseFloat(valueParts[i], 64)
@@ -173,178 +173,178 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
case "Ip":
switch key {
case "Forwarding":
- procSnmp.Ip.Forwarding = value
+ procSnmp.Ip.Forwarding = &value
case "DefaultTTL":
- procSnmp.Ip.DefaultTTL = value
+ procSnmp.Ip.DefaultTTL = &value
case "InReceives":
- procSnmp.Ip.InReceives = value
+ procSnmp.Ip.InReceives = &value
case "InHdrErrors":
- procSnmp.Ip.InHdrErrors = value
+ procSnmp.Ip.InHdrErrors = &value
case "InAddrErrors":
- procSnmp.Ip.InAddrErrors = value
+ procSnmp.Ip.InAddrErrors = &value
case "ForwDatagrams":
- procSnmp.Ip.ForwDatagrams = value
+ procSnmp.Ip.ForwDatagrams = &value
case "InUnknownProtos":
- procSnmp.Ip.InUnknownProtos = value
+ procSnmp.Ip.InUnknownProtos = &value
case "InDiscards":
- procSnmp.Ip.InDiscards = value
+ procSnmp.Ip.InDiscards = &value
case "InDelivers":
- procSnmp.Ip.InDelivers = value
+ procSnmp.Ip.InDelivers = &value
case "OutRequests":
- procSnmp.Ip.OutRequests = value
+ procSnmp.Ip.OutRequests = &value
case "OutDiscards":
- procSnmp.Ip.OutDiscards = value
+ procSnmp.Ip.OutDiscards = &value
case "OutNoRoutes":
- procSnmp.Ip.OutNoRoutes = value
+ procSnmp.Ip.OutNoRoutes = &value
case "ReasmTimeout":
- procSnmp.Ip.ReasmTimeout = value
+ procSnmp.Ip.ReasmTimeout = &value
case "ReasmReqds":
- procSnmp.Ip.ReasmReqds = value
+ procSnmp.Ip.ReasmReqds = &value
case "ReasmOKs":
- procSnmp.Ip.ReasmOKs = value
+ procSnmp.Ip.ReasmOKs = &value
case "ReasmFails":
- procSnmp.Ip.ReasmFails = value
+ procSnmp.Ip.ReasmFails = &value
case "FragOKs":
- procSnmp.Ip.FragOKs = value
+ procSnmp.Ip.FragOKs = &value
case "FragFails":
- procSnmp.Ip.FragFails = value
+ procSnmp.Ip.FragFails = &value
case "FragCreates":
- procSnmp.Ip.FragCreates = value
+ procSnmp.Ip.FragCreates = &value
}
case "Icmp":
switch key {
case "InMsgs":
- procSnmp.Icmp.InMsgs = value
+ procSnmp.Icmp.InMsgs = &value
case "InErrors":
- procSnmp.Icmp.InErrors = value
+ procSnmp.Icmp.InErrors = &value
case "InCsumErrors":
- procSnmp.Icmp.InCsumErrors = value
+ procSnmp.Icmp.InCsumErrors = &value
case "InDestUnreachs":
- procSnmp.Icmp.InDestUnreachs = value
+ procSnmp.Icmp.InDestUnreachs = &value
case "InTimeExcds":
- procSnmp.Icmp.InTimeExcds = value
+ procSnmp.Icmp.InTimeExcds = &value
case "InParmProbs":
- procSnmp.Icmp.InParmProbs = value
+ procSnmp.Icmp.InParmProbs = &value
case "InSrcQuenchs":
- procSnmp.Icmp.InSrcQuenchs = value
+ procSnmp.Icmp.InSrcQuenchs = &value
case "InRedirects":
- procSnmp.Icmp.InRedirects = value
+ procSnmp.Icmp.InRedirects = &value
case "InEchos":
- procSnmp.Icmp.InEchos = value
+ procSnmp.Icmp.InEchos = &value
case "InEchoReps":
- procSnmp.Icmp.InEchoReps = value
+ procSnmp.Icmp.InEchoReps = &value
case "InTimestamps":
- procSnmp.Icmp.InTimestamps = value
+ procSnmp.Icmp.InTimestamps = &value
case "InTimestampReps":
- procSnmp.Icmp.InTimestampReps = value
+ procSnmp.Icmp.InTimestampReps = &value
case "InAddrMasks":
- procSnmp.Icmp.InAddrMasks = value
+ procSnmp.Icmp.InAddrMasks = &value
case "InAddrMaskReps":
- procSnmp.Icmp.InAddrMaskReps = value
+ procSnmp.Icmp.InAddrMaskReps = &value
case "OutMsgs":
- procSnmp.Icmp.OutMsgs = value
+ procSnmp.Icmp.OutMsgs = &value
case "OutErrors":
- procSnmp.Icmp.OutErrors = value
+ procSnmp.Icmp.OutErrors = &value
case "OutDestUnreachs":
- procSnmp.Icmp.OutDestUnreachs = value
+ procSnmp.Icmp.OutDestUnreachs = &value
case "OutTimeExcds":
- procSnmp.Icmp.OutTimeExcds = value
+ procSnmp.Icmp.OutTimeExcds = &value
case "OutParmProbs":
- procSnmp.Icmp.OutParmProbs = value
+ procSnmp.Icmp.OutParmProbs = &value
case "OutSrcQuenchs":
- procSnmp.Icmp.OutSrcQuenchs = value
+ procSnmp.Icmp.OutSrcQuenchs = &value
case "OutRedirects":
- procSnmp.Icmp.OutRedirects = value
+ procSnmp.Icmp.OutRedirects = &value
case "OutEchos":
- procSnmp.Icmp.OutEchos = value
+ procSnmp.Icmp.OutEchos = &value
case "OutEchoReps":
- procSnmp.Icmp.OutEchoReps = value
+ procSnmp.Icmp.OutEchoReps = &value
case "OutTimestamps":
- procSnmp.Icmp.OutTimestamps = value
+ procSnmp.Icmp.OutTimestamps = &value
case "OutTimestampReps":
- procSnmp.Icmp.OutTimestampReps = value
+ procSnmp.Icmp.OutTimestampReps = &value
case "OutAddrMasks":
- procSnmp.Icmp.OutAddrMasks = value
+ procSnmp.Icmp.OutAddrMasks = &value
case "OutAddrMaskReps":
- procSnmp.Icmp.OutAddrMaskReps = value
+ procSnmp.Icmp.OutAddrMaskReps = &value
}
case "IcmpMsg":
switch key {
case "InType3":
- procSnmp.IcmpMsg.InType3 = value
+ procSnmp.IcmpMsg.InType3 = &value
case "OutType3":
- procSnmp.IcmpMsg.OutType3 = value
+ procSnmp.IcmpMsg.OutType3 = &value
}
case "Tcp":
switch key {
case "RtoAlgorithm":
- procSnmp.Tcp.RtoAlgorithm = value
+ procSnmp.Tcp.RtoAlgorithm = &value
case "RtoMin":
- procSnmp.Tcp.RtoMin = value
+ procSnmp.Tcp.RtoMin = &value
case "RtoMax":
- procSnmp.Tcp.RtoMax = value
+ procSnmp.Tcp.RtoMax = &value
case "MaxConn":
- procSnmp.Tcp.MaxConn = value
+ procSnmp.Tcp.MaxConn = &value
case "ActiveOpens":
- procSnmp.Tcp.ActiveOpens = value
+ procSnmp.Tcp.ActiveOpens = &value
case "PassiveOpens":
- procSnmp.Tcp.PassiveOpens = value
+ procSnmp.Tcp.PassiveOpens = &value
case "AttemptFails":
- procSnmp.Tcp.AttemptFails = value
+ procSnmp.Tcp.AttemptFails = &value
case "EstabResets":
- procSnmp.Tcp.EstabResets = value
+ procSnmp.Tcp.EstabResets = &value
case "CurrEstab":
- procSnmp.Tcp.CurrEstab = value
+ procSnmp.Tcp.CurrEstab = &value
case "InSegs":
- procSnmp.Tcp.InSegs = value
+ procSnmp.Tcp.InSegs = &value
case "OutSegs":
- procSnmp.Tcp.OutSegs = value
+ procSnmp.Tcp.OutSegs = &value
case "RetransSegs":
- procSnmp.Tcp.RetransSegs = value
+ procSnmp.Tcp.RetransSegs = &value
case "InErrs":
- procSnmp.Tcp.InErrs = value
+ procSnmp.Tcp.InErrs = &value
case "OutRsts":
- procSnmp.Tcp.OutRsts = value
+ procSnmp.Tcp.OutRsts = &value
case "InCsumErrors":
- procSnmp.Tcp.InCsumErrors = value
+ procSnmp.Tcp.InCsumErrors = &value
}
case "Udp":
switch key {
case "InDatagrams":
- procSnmp.Udp.InDatagrams = value
+ procSnmp.Udp.InDatagrams = &value
case "NoPorts":
- procSnmp.Udp.NoPorts = value
+ procSnmp.Udp.NoPorts = &value
case "InErrors":
- procSnmp.Udp.InErrors = value
+ procSnmp.Udp.InErrors = &value
case "OutDatagrams":
- procSnmp.Udp.OutDatagrams = value
+ procSnmp.Udp.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp.Udp.RcvbufErrors = value
+ procSnmp.Udp.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp.Udp.SndbufErrors = value
+ procSnmp.Udp.SndbufErrors = &value
case "InCsumErrors":
- procSnmp.Udp.InCsumErrors = value
+ procSnmp.Udp.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp.Udp.IgnoredMulti = value
+ procSnmp.Udp.IgnoredMulti = &value
}
case "UdpLite":
switch key {
case "InDatagrams":
- procSnmp.UdpLite.InDatagrams = value
+ procSnmp.UdpLite.InDatagrams = &value
case "NoPorts":
- procSnmp.UdpLite.NoPorts = value
+ procSnmp.UdpLite.NoPorts = &value
case "InErrors":
- procSnmp.UdpLite.InErrors = value
+ procSnmp.UdpLite.InErrors = &value
case "OutDatagrams":
- procSnmp.UdpLite.OutDatagrams = value
+ procSnmp.UdpLite.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp.UdpLite.RcvbufErrors = value
+ procSnmp.UdpLite.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp.UdpLite.SndbufErrors = value
+ procSnmp.UdpLite.SndbufErrors = &value
case "InCsumErrors":
- procSnmp.UdpLite.InCsumErrors = value
+ procSnmp.UdpLite.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp.UdpLite.IgnoredMulti = value
+ procSnmp.UdpLite.IgnoredMulti = &value
}
}
}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go
index f611992d52c..3059cc6a136 100644
--- a/vendor/github.com/prometheus/procfs/proc_snmp6.go
+++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go
@@ -36,106 +36,106 @@ type ProcSnmp6 struct {
}
type Ip6 struct { // nolint:revive
- InReceives float64
- InHdrErrors float64
- InTooBigErrors float64
- InNoRoutes float64
- InAddrErrors float64
- InUnknownProtos float64
- InTruncatedPkts float64
- InDiscards float64
- InDelivers float64
- OutForwDatagrams float64
- OutRequests float64
- OutDiscards float64
- OutNoRoutes float64
- ReasmTimeout float64
- ReasmReqds float64
- ReasmOKs float64
- ReasmFails float64
- FragOKs float64
- FragFails float64
- FragCreates float64
- InMcastPkts float64
- OutMcastPkts float64
- InOctets float64
- OutOctets float64
- InMcastOctets float64
- OutMcastOctets float64
- InBcastOctets float64
- OutBcastOctets float64
- InNoECTPkts float64
- InECT1Pkts float64
- InECT0Pkts float64
- InCEPkts float64
+ InReceives *float64
+ InHdrErrors *float64
+ InTooBigErrors *float64
+ InNoRoutes *float64
+ InAddrErrors *float64
+ InUnknownProtos *float64
+ InTruncatedPkts *float64
+ InDiscards *float64
+ InDelivers *float64
+ OutForwDatagrams *float64
+ OutRequests *float64
+ OutDiscards *float64
+ OutNoRoutes *float64
+ ReasmTimeout *float64
+ ReasmReqds *float64
+ ReasmOKs *float64
+ ReasmFails *float64
+ FragOKs *float64
+ FragFails *float64
+ FragCreates *float64
+ InMcastPkts *float64
+ OutMcastPkts *float64
+ InOctets *float64
+ OutOctets *float64
+ InMcastOctets *float64
+ OutMcastOctets *float64
+ InBcastOctets *float64
+ OutBcastOctets *float64
+ InNoECTPkts *float64
+ InECT1Pkts *float64
+ InECT0Pkts *float64
+ InCEPkts *float64
}
type Icmp6 struct {
- InMsgs float64
- InErrors float64
- OutMsgs float64
- OutErrors float64
- InCsumErrors float64
- InDestUnreachs float64
- InPktTooBigs float64
- InTimeExcds float64
- InParmProblems float64
- InEchos float64
- InEchoReplies float64
- InGroupMembQueries float64
- InGroupMembResponses float64
- InGroupMembReductions float64
- InRouterSolicits float64
- InRouterAdvertisements float64
- InNeighborSolicits float64
- InNeighborAdvertisements float64
- InRedirects float64
- InMLDv2Reports float64
- OutDestUnreachs float64
- OutPktTooBigs float64
- OutTimeExcds float64
- OutParmProblems float64
- OutEchos float64
- OutEchoReplies float64
- OutGroupMembQueries float64
- OutGroupMembResponses float64
- OutGroupMembReductions float64
- OutRouterSolicits float64
- OutRouterAdvertisements float64
- OutNeighborSolicits float64
- OutNeighborAdvertisements float64
- OutRedirects float64
- OutMLDv2Reports float64
- InType1 float64
- InType134 float64
- InType135 float64
- InType136 float64
- InType143 float64
- OutType133 float64
- OutType135 float64
- OutType136 float64
- OutType143 float64
+ InMsgs *float64
+ InErrors *float64
+ OutMsgs *float64
+ OutErrors *float64
+ InCsumErrors *float64
+ InDestUnreachs *float64
+ InPktTooBigs *float64
+ InTimeExcds *float64
+ InParmProblems *float64
+ InEchos *float64
+ InEchoReplies *float64
+ InGroupMembQueries *float64
+ InGroupMembResponses *float64
+ InGroupMembReductions *float64
+ InRouterSolicits *float64
+ InRouterAdvertisements *float64
+ InNeighborSolicits *float64
+ InNeighborAdvertisements *float64
+ InRedirects *float64
+ InMLDv2Reports *float64
+ OutDestUnreachs *float64
+ OutPktTooBigs *float64
+ OutTimeExcds *float64
+ OutParmProblems *float64
+ OutEchos *float64
+ OutEchoReplies *float64
+ OutGroupMembQueries *float64
+ OutGroupMembResponses *float64
+ OutGroupMembReductions *float64
+ OutRouterSolicits *float64
+ OutRouterAdvertisements *float64
+ OutNeighborSolicits *float64
+ OutNeighborAdvertisements *float64
+ OutRedirects *float64
+ OutMLDv2Reports *float64
+ InType1 *float64
+ InType134 *float64
+ InType135 *float64
+ InType136 *float64
+ InType143 *float64
+ OutType133 *float64
+ OutType135 *float64
+ OutType136 *float64
+ OutType143 *float64
}
type Udp6 struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
- IgnoredMulti float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
}
type UdpLite6 struct { // nolint:revive
- InDatagrams float64
- NoPorts float64
- InErrors float64
- OutDatagrams float64
- RcvbufErrors float64
- SndbufErrors float64
- InCsumErrors float64
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
}
func (p Proc) Snmp6() (ProcSnmp6, error) {
@@ -182,197 +182,197 @@ func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
case "Ip6":
switch key {
case "InReceives":
- procSnmp6.Ip6.InReceives = value
+ procSnmp6.Ip6.InReceives = &value
case "InHdrErrors":
- procSnmp6.Ip6.InHdrErrors = value
+ procSnmp6.Ip6.InHdrErrors = &value
case "InTooBigErrors":
- procSnmp6.Ip6.InTooBigErrors = value
+ procSnmp6.Ip6.InTooBigErrors = &value
case "InNoRoutes":
- procSnmp6.Ip6.InNoRoutes = value
+ procSnmp6.Ip6.InNoRoutes = &value
case "InAddrErrors":
- procSnmp6.Ip6.InAddrErrors = value
+ procSnmp6.Ip6.InAddrErrors = &value
case "InUnknownProtos":
- procSnmp6.Ip6.InUnknownProtos = value
+ procSnmp6.Ip6.InUnknownProtos = &value
case "InTruncatedPkts":
- procSnmp6.Ip6.InTruncatedPkts = value
+ procSnmp6.Ip6.InTruncatedPkts = &value
case "InDiscards":
- procSnmp6.Ip6.InDiscards = value
+ procSnmp6.Ip6.InDiscards = &value
case "InDelivers":
- procSnmp6.Ip6.InDelivers = value
+ procSnmp6.Ip6.InDelivers = &value
case "OutForwDatagrams":
- procSnmp6.Ip6.OutForwDatagrams = value
+ procSnmp6.Ip6.OutForwDatagrams = &value
case "OutRequests":
- procSnmp6.Ip6.OutRequests = value
+ procSnmp6.Ip6.OutRequests = &value
case "OutDiscards":
- procSnmp6.Ip6.OutDiscards = value
+ procSnmp6.Ip6.OutDiscards = &value
case "OutNoRoutes":
- procSnmp6.Ip6.OutNoRoutes = value
+ procSnmp6.Ip6.OutNoRoutes = &value
case "ReasmTimeout":
- procSnmp6.Ip6.ReasmTimeout = value
+ procSnmp6.Ip6.ReasmTimeout = &value
case "ReasmReqds":
- procSnmp6.Ip6.ReasmReqds = value
+ procSnmp6.Ip6.ReasmReqds = &value
case "ReasmOKs":
- procSnmp6.Ip6.ReasmOKs = value
+ procSnmp6.Ip6.ReasmOKs = &value
case "ReasmFails":
- procSnmp6.Ip6.ReasmFails = value
+ procSnmp6.Ip6.ReasmFails = &value
case "FragOKs":
- procSnmp6.Ip6.FragOKs = value
+ procSnmp6.Ip6.FragOKs = &value
case "FragFails":
- procSnmp6.Ip6.FragFails = value
+ procSnmp6.Ip6.FragFails = &value
case "FragCreates":
- procSnmp6.Ip6.FragCreates = value
+ procSnmp6.Ip6.FragCreates = &value
case "InMcastPkts":
- procSnmp6.Ip6.InMcastPkts = value
+ procSnmp6.Ip6.InMcastPkts = &value
case "OutMcastPkts":
- procSnmp6.Ip6.OutMcastPkts = value
+ procSnmp6.Ip6.OutMcastPkts = &value
case "InOctets":
- procSnmp6.Ip6.InOctets = value
+ procSnmp6.Ip6.InOctets = &value
case "OutOctets":
- procSnmp6.Ip6.OutOctets = value
+ procSnmp6.Ip6.OutOctets = &value
case "InMcastOctets":
- procSnmp6.Ip6.InMcastOctets = value
+ procSnmp6.Ip6.InMcastOctets = &value
case "OutMcastOctets":
- procSnmp6.Ip6.OutMcastOctets = value
+ procSnmp6.Ip6.OutMcastOctets = &value
case "InBcastOctets":
- procSnmp6.Ip6.InBcastOctets = value
+ procSnmp6.Ip6.InBcastOctets = &value
case "OutBcastOctets":
- procSnmp6.Ip6.OutBcastOctets = value
+ procSnmp6.Ip6.OutBcastOctets = &value
case "InNoECTPkts":
- procSnmp6.Ip6.InNoECTPkts = value
+ procSnmp6.Ip6.InNoECTPkts = &value
case "InECT1Pkts":
- procSnmp6.Ip6.InECT1Pkts = value
+ procSnmp6.Ip6.InECT1Pkts = &value
case "InECT0Pkts":
- procSnmp6.Ip6.InECT0Pkts = value
+ procSnmp6.Ip6.InECT0Pkts = &value
case "InCEPkts":
- procSnmp6.Ip6.InCEPkts = value
+ procSnmp6.Ip6.InCEPkts = &value
}
case "Icmp6":
switch key {
case "InMsgs":
- procSnmp6.Icmp6.InMsgs = value
+ procSnmp6.Icmp6.InMsgs = &value
case "InErrors":
- procSnmp6.Icmp6.InErrors = value
+ procSnmp6.Icmp6.InErrors = &value
case "OutMsgs":
- procSnmp6.Icmp6.OutMsgs = value
+ procSnmp6.Icmp6.OutMsgs = &value
case "OutErrors":
- procSnmp6.Icmp6.OutErrors = value
+ procSnmp6.Icmp6.OutErrors = &value
case "InCsumErrors":
- procSnmp6.Icmp6.InCsumErrors = value
+ procSnmp6.Icmp6.InCsumErrors = &value
case "InDestUnreachs":
- procSnmp6.Icmp6.InDestUnreachs = value
+ procSnmp6.Icmp6.InDestUnreachs = &value
case "InPktTooBigs":
- procSnmp6.Icmp6.InPktTooBigs = value
+ procSnmp6.Icmp6.InPktTooBigs = &value
case "InTimeExcds":
- procSnmp6.Icmp6.InTimeExcds = value
+ procSnmp6.Icmp6.InTimeExcds = &value
case "InParmProblems":
- procSnmp6.Icmp6.InParmProblems = value
+ procSnmp6.Icmp6.InParmProblems = &value
case "InEchos":
- procSnmp6.Icmp6.InEchos = value
+ procSnmp6.Icmp6.InEchos = &value
case "InEchoReplies":
- procSnmp6.Icmp6.InEchoReplies = value
+ procSnmp6.Icmp6.InEchoReplies = &value
case "InGroupMembQueries":
- procSnmp6.Icmp6.InGroupMembQueries = value
+ procSnmp6.Icmp6.InGroupMembQueries = &value
case "InGroupMembResponses":
- procSnmp6.Icmp6.InGroupMembResponses = value
+ procSnmp6.Icmp6.InGroupMembResponses = &value
case "InGroupMembReductions":
- procSnmp6.Icmp6.InGroupMembReductions = value
+ procSnmp6.Icmp6.InGroupMembReductions = &value
case "InRouterSolicits":
- procSnmp6.Icmp6.InRouterSolicits = value
+ procSnmp6.Icmp6.InRouterSolicits = &value
case "InRouterAdvertisements":
- procSnmp6.Icmp6.InRouterAdvertisements = value
+ procSnmp6.Icmp6.InRouterAdvertisements = &value
case "InNeighborSolicits":
- procSnmp6.Icmp6.InNeighborSolicits = value
+ procSnmp6.Icmp6.InNeighborSolicits = &value
case "InNeighborAdvertisements":
- procSnmp6.Icmp6.InNeighborAdvertisements = value
+ procSnmp6.Icmp6.InNeighborAdvertisements = &value
case "InRedirects":
- procSnmp6.Icmp6.InRedirects = value
+ procSnmp6.Icmp6.InRedirects = &value
case "InMLDv2Reports":
- procSnmp6.Icmp6.InMLDv2Reports = value
+ procSnmp6.Icmp6.InMLDv2Reports = &value
case "OutDestUnreachs":
- procSnmp6.Icmp6.OutDestUnreachs = value
+ procSnmp6.Icmp6.OutDestUnreachs = &value
case "OutPktTooBigs":
- procSnmp6.Icmp6.OutPktTooBigs = value
+ procSnmp6.Icmp6.OutPktTooBigs = &value
case "OutTimeExcds":
- procSnmp6.Icmp6.OutTimeExcds = value
+ procSnmp6.Icmp6.OutTimeExcds = &value
case "OutParmProblems":
- procSnmp6.Icmp6.OutParmProblems = value
+ procSnmp6.Icmp6.OutParmProblems = &value
case "OutEchos":
- procSnmp6.Icmp6.OutEchos = value
+ procSnmp6.Icmp6.OutEchos = &value
case "OutEchoReplies":
- procSnmp6.Icmp6.OutEchoReplies = value
+ procSnmp6.Icmp6.OutEchoReplies = &value
case "OutGroupMembQueries":
- procSnmp6.Icmp6.OutGroupMembQueries = value
+ procSnmp6.Icmp6.OutGroupMembQueries = &value
case "OutGroupMembResponses":
- procSnmp6.Icmp6.OutGroupMembResponses = value
+ procSnmp6.Icmp6.OutGroupMembResponses = &value
case "OutGroupMembReductions":
- procSnmp6.Icmp6.OutGroupMembReductions = value
+ procSnmp6.Icmp6.OutGroupMembReductions = &value
case "OutRouterSolicits":
- procSnmp6.Icmp6.OutRouterSolicits = value
+ procSnmp6.Icmp6.OutRouterSolicits = &value
case "OutRouterAdvertisements":
- procSnmp6.Icmp6.OutRouterAdvertisements = value
+ procSnmp6.Icmp6.OutRouterAdvertisements = &value
case "OutNeighborSolicits":
- procSnmp6.Icmp6.OutNeighborSolicits = value
+ procSnmp6.Icmp6.OutNeighborSolicits = &value
case "OutNeighborAdvertisements":
- procSnmp6.Icmp6.OutNeighborAdvertisements = value
+ procSnmp6.Icmp6.OutNeighborAdvertisements = &value
case "OutRedirects":
- procSnmp6.Icmp6.OutRedirects = value
+ procSnmp6.Icmp6.OutRedirects = &value
case "OutMLDv2Reports":
- procSnmp6.Icmp6.OutMLDv2Reports = value
+ procSnmp6.Icmp6.OutMLDv2Reports = &value
case "InType1":
- procSnmp6.Icmp6.InType1 = value
+ procSnmp6.Icmp6.InType1 = &value
case "InType134":
- procSnmp6.Icmp6.InType134 = value
+ procSnmp6.Icmp6.InType134 = &value
case "InType135":
- procSnmp6.Icmp6.InType135 = value
+ procSnmp6.Icmp6.InType135 = &value
case "InType136":
- procSnmp6.Icmp6.InType136 = value
+ procSnmp6.Icmp6.InType136 = &value
case "InType143":
- procSnmp6.Icmp6.InType143 = value
+ procSnmp6.Icmp6.InType143 = &value
case "OutType133":
- procSnmp6.Icmp6.OutType133 = value
+ procSnmp6.Icmp6.OutType133 = &value
case "OutType135":
- procSnmp6.Icmp6.OutType135 = value
+ procSnmp6.Icmp6.OutType135 = &value
case "OutType136":
- procSnmp6.Icmp6.OutType136 = value
+ procSnmp6.Icmp6.OutType136 = &value
case "OutType143":
- procSnmp6.Icmp6.OutType143 = value
+ procSnmp6.Icmp6.OutType143 = &value
}
case "Udp6":
switch key {
case "InDatagrams":
- procSnmp6.Udp6.InDatagrams = value
+ procSnmp6.Udp6.InDatagrams = &value
case "NoPorts":
- procSnmp6.Udp6.NoPorts = value
+ procSnmp6.Udp6.NoPorts = &value
case "InErrors":
- procSnmp6.Udp6.InErrors = value
+ procSnmp6.Udp6.InErrors = &value
case "OutDatagrams":
- procSnmp6.Udp6.OutDatagrams = value
+ procSnmp6.Udp6.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp6.Udp6.RcvbufErrors = value
+ procSnmp6.Udp6.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp6.Udp6.SndbufErrors = value
+ procSnmp6.Udp6.SndbufErrors = &value
case "InCsumErrors":
- procSnmp6.Udp6.InCsumErrors = value
+ procSnmp6.Udp6.InCsumErrors = &value
case "IgnoredMulti":
- procSnmp6.Udp6.IgnoredMulti = value
+ procSnmp6.Udp6.IgnoredMulti = &value
}
case "UdpLite6":
switch key {
case "InDatagrams":
- procSnmp6.UdpLite6.InDatagrams = value
+ procSnmp6.UdpLite6.InDatagrams = &value
case "NoPorts":
- procSnmp6.UdpLite6.NoPorts = value
+ procSnmp6.UdpLite6.NoPorts = &value
case "InErrors":
- procSnmp6.UdpLite6.InErrors = value
+ procSnmp6.UdpLite6.InErrors = &value
case "OutDatagrams":
- procSnmp6.UdpLite6.OutDatagrams = value
+ procSnmp6.UdpLite6.OutDatagrams = &value
case "RcvbufErrors":
- procSnmp6.UdpLite6.RcvbufErrors = value
+ procSnmp6.UdpLite6.RcvbufErrors = &value
case "SndbufErrors":
- procSnmp6.UdpLite6.SndbufErrors = value
+ procSnmp6.UdpLite6.SndbufErrors = &value
case "InCsumErrors":
- procSnmp6.UdpLite6.InCsumErrors = value
+ procSnmp6.UdpLite6.InCsumErrors = &value
}
}
}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
index 06c556ef962..06a8d931c98 100644
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -18,7 +18,6 @@ import (
"fmt"
"os"
- "github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
@@ -102,6 +101,8 @@ type ProcStat struct {
RSS int
// Soft limit in bytes on the rss of the process.
RSSLimit uint64
+ // CPU number last executed on.
+ Processor uint
// Real-time scheduling priority, a number in the range 1 to 99 for processes
// scheduled under a real-time policy, or 0, for non-real-time processes.
RTPriority uint
@@ -109,8 +110,13 @@ type ProcStat struct {
Policy uint
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
DelayAcctBlkIOTicks uint64
+ // Guest time of the process (time spent running a virtual CPU for a guest
+ // operating system), measured in clock ticks.
+ GuestTime int
+ // Guest time of the process's children, measured in clock ticks.
+ CGuestTime int
- proc fs.FS
+ proc FS
}
// NewStat returns the current status information of the process.
@@ -137,7 +143,7 @@ func (p Proc) Stat() (ProcStat, error) {
)
if l < 0 || r < 0 {
- return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data)
+ return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data)
}
s.Comm = string(data[l+1 : r])
@@ -184,10 +190,12 @@ func (p Proc) Stat() (ProcStat, error) {
&ignoreUint64,
&ignoreUint64,
&ignoreInt64,
- &ignoreInt64,
+ &s.Processor,
&s.RTPriority,
&s.Policy,
&s.DelayAcctBlkIOTicks,
+ &s.GuestTime,
+ &s.CGuestTime,
)
if err != nil {
return ProcStat{}, err
@@ -208,8 +216,7 @@ func (s ProcStat) ResidentMemory() int {
// StartTime returns the unix timestamp of the process in seconds.
func (s ProcStat) StartTime() (float64, error) {
- fs := FS{proc: s.proc}
- stat, err := fs.Stat()
+ stat, err := s.proc.Stat()
if err != nil {
return 0, err
}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
index 594022ded48..a055197c63e 100644
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -15,6 +15,8 @@ package procfs
import (
"bytes"
+ "math/bits"
+ "sort"
"strconv"
"strings"
@@ -22,7 +24,7 @@ import (
)
// ProcStatus provides status information about the process,
-// read from /proc/[pid]/stat.
+// read from /proc/[pid]/status.
type ProcStatus struct {
// The process ID.
PID int
@@ -31,6 +33,8 @@ type ProcStatus struct {
// Thread group ID.
TGID int
+ // List of Pid namespace.
+ NSpids []uint64
// Peak virtual memory size.
VmPeak uint64 // nolint:revive
@@ -73,9 +77,12 @@ type ProcStatus struct {
NonVoluntaryCtxtSwitches uint64
// UIDs of the process (Real, effective, saved set, and filesystem UIDs)
- UIDs [4]string
+ UIDs [4]uint64
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
- GIDs [4]string
+ GIDs [4]uint64
+
+ // CpusAllowedList: List of cpu cores processes are allowed to run on.
+ CpusAllowedList []uint64
}
// NewStatus returns the current status information of the process.
@@ -96,10 +103,10 @@ func (p Proc) NewStatus() (ProcStatus, error) {
kv := strings.SplitN(line, ":", 2)
// removes spaces
- k := string(strings.TrimSpace(kv[0]))
- v := string(strings.TrimSpace(kv[1]))
+ k := strings.TrimSpace(kv[0])
+ v := strings.TrimSpace(kv[1])
// removes "kB"
- v = string(bytes.Trim([]byte(v), " kB"))
+ v = strings.TrimSuffix(v, " kB")
// value to int when possible
// we can skip error check here, 'cause vKBytes is not used when value is a string
@@ -107,22 +114,39 @@ func (p Proc) NewStatus() (ProcStatus, error) {
// convert kB to B
vBytes := vKBytes * 1024
- s.fillStatus(k, v, vKBytes, vBytes)
+ err = s.fillStatus(k, v, vKBytes, vBytes)
+ if err != nil {
+ return ProcStatus{}, err
+ }
}
return s, nil
}
-func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
+func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error {
switch k {
case "Tgid":
s.TGID = int(vUint)
case "Name":
s.Name = vString
case "Uid":
- copy(s.UIDs[:], strings.Split(vString, "\t"))
+ var err error
+ for i, v := range strings.Split(vString, "\t") {
+ s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
+ if err != nil {
+ return err
+ }
+ }
case "Gid":
- copy(s.GIDs[:], strings.Split(vString, "\t"))
+ var err error
+ for i, v := range strings.Split(vString, "\t") {
+ s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
+ if err != nil {
+ return err
+ }
+ }
+ case "NSpid":
+ s.NSpids = calcNSPidsList(vString)
case "VmPeak":
s.VmPeak = vUintBytes
case "VmSize":
@@ -161,10 +185,54 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.VoluntaryCtxtSwitches = vUint
case "nonvoluntary_ctxt_switches":
s.NonVoluntaryCtxtSwitches = vUint
+ case "Cpus_allowed_list":
+ s.CpusAllowedList = calcCpusAllowedList(vString)
}
+
+ return nil
}
// TotalCtxtSwitches returns the total context switch.
func (s ProcStatus) TotalCtxtSwitches() uint64 {
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
}
+
+func calcCpusAllowedList(cpuString string) []uint64 {
+ s := strings.Split(cpuString, ",")
+
+ var g []uint64
+
+ for _, cpu := range s {
+ // parse cpu ranges, example: 1-3=[1,2,3]
+ if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 {
+ startCPU, _ := strconv.ParseUint(l[0], 10, 64)
+ endCPU, _ := strconv.ParseUint(l[1], 10, 64)
+
+ for i := startCPU; i <= endCPU; i++ {
+ g = append(g, i)
+ }
+ } else if len(l) == 1 {
+ cpu, _ := strconv.ParseUint(l[0], 10, 64)
+ g = append(g, cpu)
+ }
+
+ }
+
+ sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
+ return g
+}
+
+func calcNSPidsList(nspidsString string) []uint64 {
+ s := strings.Split(nspidsString, " ")
+ var nspids []uint64
+
+ for _, nspid := range s {
+ nspid, _ := strconv.ParseUint(nspid, 10, 64)
+ if nspid == 0 {
+ continue
+ }
+ nspids = append(nspids, nspid)
+ }
+
+ return nspids
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go
index d46533ebf41..5eefbe2ef8b 100644
--- a/vendor/github.com/prometheus/procfs/proc_sys.go
+++ b/vendor/github.com/prometheus/procfs/proc_sys.go
@@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) {
vp := util.NewValueParser(f)
values[i] = vp.Int()
if err := vp.Err(); err != nil {
- return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err)
+ return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
}
}
return values, nil
diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go
index bc9aaf5c288..8611c901770 100644
--- a/vendor/github.com/prometheus/procfs/slab.go
+++ b/vendor/github.com/prometheus/procfs/slab.go
@@ -68,7 +68,7 @@ func parseV21SlabEntry(line string) (*Slab, error) {
l := slabSpace.ReplaceAllString(line, " ")
s := strings.Split(l, " ")
if len(s) != 16 {
- return nil, fmt.Errorf("unable to parse: %q", line)
+ return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line)
}
var err error
i := &Slab{Name: s[0]}
diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go
index 559129cbca3..28708e07459 100644
--- a/vendor/github.com/prometheus/procfs/softirqs.go
+++ b/vendor/github.com/prometheus/procfs/softirqs.go
@@ -57,7 +57,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
)
if !scanner.Scan() {
- return Softirqs{}, fmt.Errorf("softirqs empty")
+ return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead)
}
for scanner.Scan() {
@@ -74,7 +74,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Hi = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (HI%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "TIMER:":
@@ -82,7 +82,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Timer = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (TIMER%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "NET_TX:":
@@ -90,7 +90,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.NetTx = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_TX%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "NET_RX:":
@@ -98,7 +98,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.NetRx = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (NET_RX%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "BLOCK:":
@@ -106,7 +106,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Block = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (BLOCK%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "IRQ_POLL:":
@@ -114,7 +114,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.IRQPoll = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (IRQ_POLL%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "TASKLET:":
@@ -122,7 +122,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Tasklet = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (TASKLET%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "SCHED:":
@@ -130,7 +130,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.Sched = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (SCHED%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "HRTIMER:":
@@ -138,7 +138,7 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.HRTimer = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (HRTIMER%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "RCU:":
@@ -146,14 +146,14 @@ func parseSoftirqs(r io.Reader) (Softirqs, error) {
softirqs.RCU = make([]uint64, len(perCPU))
for i, count := range perCPU {
if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse %q (RCU%d): %w", count, i, err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
}
}
}
}
if err := scanner.Err(); err != nil {
- return Softirqs{}, fmt.Errorf("couldn't parse softirqs: %w", err)
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err)
}
return softirqs, scanner.Err()
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
index 33f97caa08d..e36b41c18a9 100644
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -62,7 +62,7 @@ type Stat struct {
// Summed up cpu statistics.
CPUTotal CPUStat
// Per-CPU statistics.
- CPU []CPUStat
+ CPU map[int64]CPUStat
// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
IRQTotal uint64
// Number of times a numbered IRQ was triggered.
@@ -93,10 +93,10 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
&cpuStat.Guest, &cpuStat.GuestNice)
if err != nil && err != io.EOF {
- return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err)
+ return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
}
if count == 0 {
- return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line)
+ return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
}
cpuStat.User /= userHZ
@@ -116,7 +116,7 @@ func parseCPUStat(line string) (CPUStat, int64, error) {
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
if err != nil {
- return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err)
+ return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
}
return cpuStat, cpuID, nil
@@ -136,7 +136,7 @@ func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
if err != nil {
- return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err)
+ return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
}
return softIRQStat, total, nil
@@ -170,10 +170,27 @@ func (fs FS) Stat() (Stat, error) {
if err != nil {
return Stat{}, err
}
+ procStat, err := parseStat(bytes.NewReader(data), fileName)
+ if err != nil {
+ return Stat{}, err
+ }
+ return procStat, nil
+}
- stat := Stat{}
+// parseStat parses the metrics from /proc/[pid]/stat.
+func parseStat(r io.Reader, fileName string) (Stat, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ stat = Stat{
+ CPU: make(map[int64]CPUStat),
+ }
+ err error
+ )
+
+ // Increase default scanner buffer to handle very long `intr` lines.
+ buf := make([]byte, 0, 8*1024)
+ scanner.Buffer(buf, 1024*1024)
- scanner := bufio.NewScanner(bytes.NewReader(data))
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(scanner.Text())
@@ -184,34 +201,34 @@ func (fs FS) Stat() (Stat, error) {
switch {
case parts[0] == "btime":
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "intr":
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
}
numberedIRQs := parts[2:]
stat.IRQ = make([]uint64, len(numberedIRQs))
for i, count := range numberedIRQs {
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "ctxt":
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "processes":
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "procs_running":
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "procs_blocked":
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "softirq":
softIRQStats, total, err := parseSoftIRQStat(line)
@@ -228,16 +245,13 @@ func (fs FS) Stat() (Stat, error) {
if cpuID == -1 {
stat.CPUTotal = cpuStat
} else {
- for int64(len(stat.CPU)) <= cpuID {
- stat.CPU = append(stat.CPU, CPUStat{})
- }
stat.CPU[cpuID] = cpuStat
}
}
}
if err := scanner.Err(); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err)
}
return stat, nil
diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go
index 15edc2212b6..65fec834bf4 100644
--- a/vendor/github.com/prometheus/procfs/swaps.go
+++ b/vendor/github.com/prometheus/procfs/swaps.go
@@ -64,7 +64,7 @@ func parseSwapString(swapString string) (*Swap, error) {
swapFields := strings.Fields(swapString)
swapLength := len(swapFields)
if swapLength < 5 {
- return nil, fmt.Errorf("too few fields in swap string: %s", swapString)
+ return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString)
}
swap := &Swap{
@@ -74,15 +74,15 @@ func parseSwapString(swapString string) (*Swap, error) {
swap.Size, err = strconv.Atoi(swapFields[2])
if err != nil {
- return nil, fmt.Errorf("invalid swap size: %s", swapFields[2])
+ return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
}
swap.Used, err = strconv.Atoi(swapFields[3])
if err != nil {
- return nil, fmt.Errorf("invalid swap used: %s", swapFields[3])
+ return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
}
swap.Priority, err = strconv.Atoi(swapFields[4])
if err != nil {
- return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4])
+ return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
}
return swap, nil
diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go
new file mode 100644
index 00000000000..80e0e947be7
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/thread.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+
+ fsi "github.com/prometheus/procfs/internal/fs"
+)
+
+// Provide access to /proc/PID/task/TID files, for thread specific values. Since
+// such files have the same structure as /proc/PID/ ones, the data structures
+// and the parsers for the latter may be reused.
+
+// AllThreads returns a list of all currently available threads under /proc/PID.
+func AllThreads(pid int) (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllThreads(pid)
+}
+
+// AllThreads returns a list of all currently available threads for PID.
+func (fs FS) AllThreads(pid int) (Procs, error) {
+ taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+ d, err := os.Open(taskPath)
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err)
+ }
+
+ t := Procs{}
+ for _, n := range names {
+ tid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+
+ t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}})
+ }
+
+ return t, nil
+}
+
+// Thread returns a process for a given PID, TID.
+func (fs FS) Thread(pid, tid int) (Proc, error) {
+ taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+ if _, err := os.Stat(taskPath); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil
+}
+
+// Thread returns a process for a given TID of Proc.
+func (proc Proc) Thread(tid int) (Proc, error) {
+ tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal}
+ if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: tid, fs: tfs}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go
index 20ceb77e2df..51c49d89e81 100644
--- a/vendor/github.com/prometheus/procfs/vm.go
+++ b/vendor/github.com/prometheus/procfs/vm.go
@@ -26,7 +26,9 @@ import (
)
// The VM interface is described at
-// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
+// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
// Each setting is exposed as a single file.
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
// and numa_zonelist_order (deprecated) which is a string.
@@ -84,7 +86,7 @@ func (fs FS) VM() (*VM, error) {
return nil, err
}
if !file.Mode().IsDir() {
- return nil, fmt.Errorf("%s is not a directory", path)
+ return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path)
}
files, err := os.ReadDir(path)
diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go
index c745a4c04ff..e54d94b0903 100644
--- a/vendor/github.com/prometheus/procfs/zoneinfo.go
+++ b/vendor/github.com/prometheus/procfs/zoneinfo.go
@@ -75,11 +75,11 @@ var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
if err != nil {
- return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
+ return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
}
zoneinfo, err := parseZoneinfo(data)
if err != nil {
- return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
+ return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
}
return zoneinfo, nil
}
diff --git a/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
new file mode 100644
index 00000000000..2ef36bbcf92
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
@@ -0,0 +1,160 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protodelim marshals and unmarshals varint size-delimited messages.
+package protodelim
+
+import (
+ "bufio"
+ "encoding/binary"
+ "fmt"
+ "io"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/proto"
+)
+
+// MarshalOptions is a configurable varint size-delimited marshaler.
+type MarshalOptions struct{ proto.MarshalOptions }
+
+// MarshalTo writes a varint size-delimited wire-format message to w.
+// If w returns an error, MarshalTo returns it unchanged.
+func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) {
+ msgBytes, err := o.MarshalOptions.Marshal(m)
+ if err != nil {
+ return 0, err
+ }
+
+ sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes)))
+ sizeWritten, err := w.Write(sizeBytes)
+ if err != nil {
+ return sizeWritten, err
+ }
+ msgWritten, err := w.Write(msgBytes)
+ if err != nil {
+ return sizeWritten + msgWritten, err
+ }
+ return sizeWritten + msgWritten, nil
+}
+
+// MarshalTo writes a varint size-delimited wire-format message to w
+// with the default options.
+//
+// See the documentation for [MarshalOptions.MarshalTo].
+func MarshalTo(w io.Writer, m proto.Message) (int, error) {
+ return MarshalOptions{}.MarshalTo(w, m)
+}
+
+// UnmarshalOptions is a configurable varint size-delimited unmarshaler.
+type UnmarshalOptions struct {
+ proto.UnmarshalOptions
+
+ // MaxSize is the maximum size in wire-format bytes of a single message.
+ // Unmarshaling a message larger than MaxSize will return an error.
+ // A zero MaxSize will default to 4 MiB.
+ // Setting MaxSize to -1 disables the limit.
+ MaxSize int64
+}
+
+const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size
+
+// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size
+// that is larger than its configured [UnmarshalOptions.MaxSize].
+type SizeTooLargeError struct {
+ // Size is the varint size of the message encountered
+ // that was larger than the provided MaxSize.
+ Size uint64
+
+ // MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded.
+ MaxSize uint64
+}
+
+func (e *SizeTooLargeError) Error() string {
+ return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize)
+}
+
+// Reader is the interface expected by [UnmarshalFrom].
+// It is implemented by *[bufio.Reader].
+type Reader interface {
+ io.Reader
+ io.ByteReader
+}
+
+// UnmarshalFrom parses and consumes a varint size-delimited wire-format message
+// from r.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+//
+// The error is [io.EOF] error only if no bytes are read.
+// If an EOF happens after reading some but not all the bytes,
+// UnmarshalFrom returns a non-io.EOF error.
+// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged,
+// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned.
+func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error {
+ var sizeArr [binary.MaxVarintLen64]byte
+ sizeBuf := sizeArr[:0]
+ for i := range sizeArr {
+ b, err := r.ReadByte()
+ if err != nil {
+ // Immediate EOF is unexpected.
+ if err == io.EOF && i != 0 {
+ break
+ }
+ return err
+ }
+ sizeBuf = append(sizeBuf, b)
+ if b < 0x80 {
+ break
+ }
+ }
+ size, n := protowire.ConsumeVarint(sizeBuf)
+ if n < 0 {
+ return protowire.ParseError(n)
+ }
+
+ maxSize := o.MaxSize
+ if maxSize == 0 {
+ maxSize = defaultMaxSize
+ }
+ if maxSize != -1 && size > uint64(maxSize) {
+ return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "")
+ }
+
+ var b []byte
+ var err error
+ if br, ok := r.(*bufio.Reader); ok {
+ // Use the []byte from the bufio.Reader instead of having to allocate one.
+ // This reduces CPU usage and allocated bytes.
+ b, err = br.Peek(int(size))
+ if err == nil {
+ defer br.Discard(int(size))
+ } else {
+ b = nil
+ }
+ }
+ if b == nil {
+ b = make([]byte, size)
+ _, err = io.ReadFull(r, b)
+ }
+
+ if err == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ if err != nil {
+ return err
+ }
+ if err := o.Unmarshal(b, m); err != nil {
+ return err
+ }
+ return nil
+}
+
+// UnmarshalFrom parses and consumes a varint size-delimited wire-format message
+// from r with the default options.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+//
+// See the documentation for [UnmarshalOptions.UnmarshalFrom].
+func UnmarshalFrom(r Reader, m proto.Message) error {
+ return UnmarshalOptions{}.UnmarshalFrom(r, m)
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 2d370c06e85..e319774983e 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -64,7 +64,7 @@ github.com/ale-linux/aries-framework-go/component/kmscrypto/crypto/primitive/bbs
## explicit
github.com/alecthomas/template
github.com/alecthomas/template/parse
-# github.com/alecthomas/units v0.0.0-20210912230133-d1bdfacee922
+# github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
## explicit; go 1.15
github.com/alecthomas/units
# github.com/beorn7/perks v1.0.1
@@ -282,8 +282,8 @@ github.com/inconshreveable/mousetrap
# github.com/kilic/bls12-381 v0.1.0
## explicit; go 1.13
github.com/kilic/bls12-381
-# github.com/klauspost/compress v1.17.6
-## explicit; go 1.19
+# github.com/klauspost/compress v1.17.9
+## explicit; go 1.20
github.com/klauspost/compress
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
@@ -303,9 +303,6 @@ github.com/magiconair/properties
# github.com/mattn/go-runewidth v0.0.4
## explicit
github.com/mattn/go-runewidth
-# github.com/matttproud/golang_protobuf_extensions v1.0.4
-## explicit; go 1.9
-github.com/matttproud/golang_protobuf_extensions/pbutil
# github.com/miekg/pkcs11 v1.1.1
## explicit; go 1.12
github.com/miekg/pkcs11
@@ -355,6 +352,9 @@ github.com/moby/term/windows
# github.com/morikuni/aec v1.0.0
## explicit
github.com/morikuni/aec
+# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
+## explicit
+github.com/munnerz/goautoneg
# github.com/onsi/ginkgo/v2 v2.13.2
## explicit; go 1.18
github.com/onsi/ginkgo/v2
@@ -413,21 +413,22 @@ github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0
## explicit
github.com/pmezard/go-difflib/difflib
-# github.com/prometheus/client_golang v1.11.1
-## explicit; go 1.13
+# github.com/prometheus/client_golang v1.20.5
+## explicit; go 1.20
+github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
+github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp
-# github.com/prometheus/client_model v0.3.0
-## explicit; go 1.9
+# github.com/prometheus/client_model v0.6.1
+## explicit; go 1.19
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.32.1
-## explicit; go 1.13
+# github.com/prometheus/common v0.55.0
+## explicit; go 1.20
github.com/prometheus/common/expfmt
-github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model
-# github.com/prometheus/procfs v0.8.0
-## explicit; go 1.17
+# github.com/prometheus/procfs v0.15.1
+## explicit; go 1.20
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
@@ -660,6 +661,7 @@ google.golang.org/grpc/status
google.golang.org/grpc/tap
# google.golang.org/protobuf v1.34.2
## explicit; go 1.20
+google.golang.org/protobuf/encoding/protodelim
google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext
google.golang.org/protobuf/encoding/protowire