diff --git a/go.mod b/go.mod index 01fb21414bb..87f61446565 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/prometheus/alertmanager v0.27.0 github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common v0.59.1 + github.com/prometheus/common v0.60.0 github.com/prometheus/prometheus v1.99.0 github.com/segmentio/fasthash v1.0.3 github.com/sirupsen/logrus v1.9.3 @@ -82,19 +82,19 @@ require ( github.com/twmb/franz-go/plugin/kotel v1.5.0 github.com/twmb/franz-go/plugin/kprom v1.1.0 github.com/xlab/treeprint v1.2.0 - go.opentelemetry.io/collector/pdata v1.15.0 - go.opentelemetry.io/otel v1.30.0 - go.opentelemetry.io/otel/trace v1.30.0 + go.opentelemetry.io/collector/pdata v1.16.0 + go.opentelemetry.io/otel v1.31.0 + go.opentelemetry.io/otel/trace v1.31.0 go.uber.org/multierr v1.11.0 golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c golang.org/x/term v0.25.0 - google.golang.org/api v0.196.0 + google.golang.org/api v0.199.0 google.golang.org/protobuf v1.35.1 sigs.k8s.io/kustomize/kyaml v0.16.0 ) require ( - cloud.google.com/go/auth v0.9.3 // indirect + cloud.google.com/go/auth v0.9.5 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect @@ -112,7 +112,7 @@ require ( github.com/go-test/deep v1.1.0 // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect - github.com/golang/glog v1.2.1 // indirect + github.com/golang/glog v1.2.2 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/grafana/pyroscope-go/godeltaprof v0.1.8 // indirect github.com/hashicorp/go-retryablehttp v0.7.7 // indirect @@ -122,6 +122,8 @@ require ( github.com/huandu/xstrings v1.3.2 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/mdlayher/socket v0.4.1 // indirect + github.com/mdlayher/vsock v1.2.1 // indirect github.com/mitchellh/copystructure v1.0.0 // indirect github.com/mitchellh/reflectwalk v1.0.0 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect @@ -134,20 +136,19 @@ require ( github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.55.0 // indirect - go.opentelemetry.io/otel/sdk v1.30.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.30.0 // indirect gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect gopkg.in/mail.v2 v2.3.1 // indirect gopkg.in/telebot.v3 v3.2.1 // indirect - k8s.io/apimachinery v0.29.3 // indirect - k8s.io/client-go v0.29.3 // indirect + k8s.io/apimachinery v0.31.1 // indirect + k8s.io/client-go v0.31.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect ) require ( cloud.google.com/go v0.115.1 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/compute/metadata v0.5.2 // indirect cloud.google.com/go/iam v1.2.0 // indirect github.com/DmitriyVTitov/size v1.5.0 // indirect github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect @@ -233,7 +234,7 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 - github.com/miekg/dns v1.1.61 // indirect + github.com/miekg/dns v1.1.62 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect @@ -246,7 +247,7 @@ require ( github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/exporter-toolkit v0.11.0 // indirect + github.com/prometheus/exporter-toolkit v0.13.0 // indirect github.com/rainycape/unidecode v0.0.0-20150907023854-cb7f23ec59be // indirect github.com/rs/cors v1.11.0 // indirect github.com/rs/xid v1.6.0 // indirect @@ -264,25 +265,25 @@ require ( go.etcd.io/etcd/client/v3 v3.5.4 // indirect go.mongodb.org/mongo-driver v1.14.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/semconv v0.105.0 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 // indirect - go.opentelemetry.io/otel/metric v1.30.0 // indirect + go.opentelemetry.io/collector/semconv v0.110.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect + go.opentelemetry.io/otel/metric v1.31.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/mod v0.21.0 // indirect - golang.org/x/oauth2 v0.22.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/text v0.19.0 // indirect golang.org/x/tools v0.26.0 // indirect google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) // Using a fork of Prometheus with Mimir-specific changes. -replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20241104175756-dea6247a158f +replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20241107161256-ab952f991472 // Replace memberlist with our fork which includes some fixes that haven't been // merged upstream yet: diff --git a/go.sum b/go.sum index 6eb0c2129c5..6b665f2cf20 100644 --- a/go.sum +++ b/go.sum @@ -117,8 +117,8 @@ cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEar cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0= -cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= -cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw= +cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= @@ -218,8 +218,8 @@ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1h cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= +cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -926,8 +926,9 @@ github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw= github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= +github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 h1:FpZSn61BWXbtyH68+uSv416veEswX1M2HRyQfdHnOyQ= github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= @@ -949,14 +950,14 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczC github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/digitalocean/godo v1.121.0 h1:ilXiHuEnhbJs2fmFEPX0r/QQ6KfiOIMAhJN3f8NiCfI= -github.com/digitalocean/godo v1.121.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= +github.com/digitalocean/godo v1.126.0 h1:+Znh7VMQj/E8ArbjWnc7OKGjWfzC+I8OCSRp7r1MdD8= +github.com/digitalocean/godo v1.126.0/go.mod h1:PU8JB6I1XYkQIdHFop8lLAY9ojp6M0XcU0TWaQSxbrc= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY= -github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= +github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y= github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -979,8 +980,8 @@ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go. github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= -github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155 h1:IgJPqnrlY2Mr4pYB6oaMKvFvwJ9H+X6CCY5x1vCTcpc= -github.com/envoyproxy/go-control-plane v0.12.1-0.20240621013728-1eb8caab5155/go.mod h1:5Wkq+JduFtdAXihLmeTJf+tRYIT4KBc2vPXDhwVo1pA= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= @@ -1012,6 +1013,8 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474 h1:TufioMBjkJ6/Oqmlye/ReuxHFS35HyLmypj/BNy/8GY= github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474/go.mod h1:PQwxF4UU8wuL+srGxr3BOhIW5zXqgucwVlO/nPZLsxw= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= @@ -1079,8 +1082,8 @@ github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96 github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= -github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I= +github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= @@ -1111,8 +1114,9 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGw github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= -github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= +github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -1246,8 +1250,8 @@ github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDP github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8= -github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.14.1 h1:DTCNaTVGl8/cFu58O1JwWgis9gtISAFONqpMKNg/Vpw= +github.com/gophercloud/gophercloud v1.14.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= @@ -1270,8 +1274,8 @@ github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40 h1:1TeKhyS+pvzO github.com/grafana/gomemcache v0.0.0-20241016125027-0a5bcc5aef40/go.mod h1:IGRj8oOoxwJbHBYl1+OhS9UjQR0dv6SQOep7HqmtyFU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/mimir-prometheus v0.0.0-20241104175756-dea6247a158f h1:/0K7ExuyP5pS/AUqibuJi6ATxEaZG6/2wcAbWMvw8D4= -github.com/grafana/mimir-prometheus v0.0.0-20241104175756-dea6247a158f/go.mod h1:7SuFBLahBoRY7KcgzWzK0p1n5QL+5dyr/Ysat6v1378= +github.com/grafana/mimir-prometheus v0.0.0-20241107161256-ab952f991472 h1:KH5DBqBOpfm3y6wNPLBhH0jvaEFsUPGdhhXCmyJ15wE= +github.com/grafana/mimir-prometheus v0.0.0-20241107161256-ab952f991472/go.mod h1:5pZyo8JoQezsp5hvVLlWhXmWLFcjUCC0fFvmswy2cBA= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956 h1:em1oddjXL8c1tL0iFdtVtPloq2hRPen2MJQKoAWpxu0= github.com/grafana/opentracing-contrib-go-stdlib v0.0.0-20230509071955-f410e79da956/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/grafana/prometheus-alertmanager v0.25.1-0.20240930132144-b5e64e81e8d3 h1:6D2gGAwyQBElSrp3E+9lSr7k8gLuP3Aiy20rweLWeBw= @@ -1439,8 +1443,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/linode/linodego v1.38.0 h1:wP3oW9OhGc6vhze8NPf2knbwH4TzSbrjzuCd9okjbTY= -github.com/linode/linodego v1.38.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ= +github.com/linode/linodego v1.41.0 h1:GcP7JIBr9iLRJ9FwAtb9/WCT1DuPJS/xUApapfdjtiY= +github.com/linode/linodego v1.41.0/go.mod h1:Ow4/XZ0yvWBzt3iAHwchvhSx30AyLintsSMvvQ2/SJY= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= @@ -1473,10 +1477,14 @@ github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ= +github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs= -github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= @@ -1532,8 +1540,8 @@ github.com/okzk/sdnotify v0.0.0-20240725214427-1c1fdd37c5ac h1:0h5zys3uIyKGGt6Lo github.com/okzk/sdnotify v0.0.0-20240725214427-1c1fdd37c5ac/go.mod h1:4soZNh0zW0LtYGdQ416i0jO0EIqMGcbtaspRS4BDvRQ= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= -github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= +github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= @@ -1598,12 +1606,12 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.59.1 h1:LXb1quJHWm1P6wq/U824uxYi4Sg0oGvNeUm1z5dJoX0= -github.com/prometheus/common v0.59.1/go.mod h1:GpWM7dewqmVYcd7SmRaiWVe9SSqjf0UrwnYnpEZNuT0= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g= -github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q= +github.com/prometheus/exporter-toolkit v0.13.0 h1:lmA0Q+8IaXgmFRKw09RldZmZdnvu9wwcDLIXGmTPw1c= +github.com/prometheus/exporter-toolkit v0.13.0/go.mod h1:2uop99EZl80KdXhv/MxVI2181fMcwlsumFOqBecGkG0= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -1633,8 +1641,8 @@ github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFo github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sagikazarmark/crypt v0.6.0/go.mod h1:U8+INwJo3nBv1m6A/8OBXAq7Jnpspk5AxSgDyEQcea8= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 h1:BkTk4gynLjguayxrYxZoMZjBnAOh7ntQvUkOFmkMqPU= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM= @@ -1722,6 +1730,8 @@ github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVK github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= @@ -1757,26 +1767,26 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector/pdata v1.15.0 h1:q/T1sFpRKJnjDrUsHdJ6mq4uSqViR/f92yvGwDby/gY= -go.opentelemetry.io/collector/pdata v1.15.0/go.mod h1:2wcsTIiLAJSbqBq/XUUYbi+cP+N87d0jEJzmb9nT19U= -go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g= -go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw= +go.opentelemetry.io/collector/pdata v1.16.0 h1:g02K8jlRnmQ7TQDuXpdgVL6vIxIVqr5Gbb1qIR27rto= +go.opentelemetry.io/collector/pdata v1.16.0/go.mod h1:YZZJIt2ehxosYf/Y1pbvexjNWsIGNNrzzlCTO9jC1F4= +go.opentelemetry.io/collector/semconv v0.110.0 h1:KHQnOHe3gUz0zsxe8ph9kN5OTypCFD4V+06AiBTfeNk= +go.opentelemetry.io/collector/semconv v0.110.0/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.55.0 h1:sqmsIQ75l6lfZjjpnXXT9DFVtYEDg6CH0/Cn4/3A1Wg= -go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.55.0/go.mod h1:rsg1EO8LXSs2po50PB5CeY/MSVlhghuKBgXlKnqm6ks= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 h1:ZIg3ZT/aQ7AfKqdwp7ECpOK6vHqquXXuyTjIO8ZdmPs= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0/go.mod h1:DQAwmETtZV00skUwgD6+0U89g80NKsJE3DCKeLLPQMI= -go.opentelemetry.io/otel v1.30.0 h1:F2t8sK4qf1fAmY9ua4ohFS/K+FUuOPemHUIXHtktrts= -go.opentelemetry.io/otel v1.30.0/go.mod h1:tFw4Br9b7fOS+uEao81PJjVMjW/5fvNCbpsDIXqP0pc= -go.opentelemetry.io/otel/metric v1.30.0 h1:4xNulvn9gjzo4hjg+wzIKG7iNFEaBMX00Qd4QIZs7+w= -go.opentelemetry.io/otel/metric v1.30.0/go.mod h1:aXTfST94tswhWEb+5QjlSqG+cZlmyXy/u8jFpor3WqQ= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= go.opentelemetry.io/otel/sdk v1.30.0 h1:cHdik6irO49R5IysVhdn8oaiR9m8XluDaJAs4DfOrYE= go.opentelemetry.io/otel/sdk v1.30.0/go.mod h1:p14X4Ok8S+sygzblytT1nqG98QG2KYKv++HE0LY/mhg= go.opentelemetry.io/otel/sdk/metric v1.30.0 h1:QJLT8Pe11jyHBHfSAgYH7kEmT24eX792jZO1bo4BXkM= go.opentelemetry.io/otel/sdk/metric v1.30.0/go.mod h1:waS6P3YqFNzeP01kuo/MBBYqaoBJl7efRQHOaydhy1Y= -go.opentelemetry.io/otel/trace v1.30.0 h1:7UBkkYzeg3C7kQX8VAidWh2biiQbtAKjyIML8dQ9wmc= -go.opentelemetry.io/otel/trace v1.30.0/go.mod h1:5EyKqTzzmyqB9bwtCCq6pDLktPK6fmGf/Dph+8VI02o= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= @@ -1987,8 +1997,8 @@ golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2318,8 +2328,8 @@ google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZ google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/api v0.196.0 h1:k/RafYqebaIJBO3+SMnfEGtFVlvp5vSgqTUF54UN/zg= -google.golang.org/api v0.196.0/go.mod h1:g9IL21uGkYgvQ5BZg6BAtoGJQIm8r6EgaAbpNey5wBE= +google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs= +google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -2482,8 +2492,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go. google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157/go.mod h1:99sLkeliLXfdj2J75X3Ho+rrVCaJze0uwN7zDDkjPVU= -google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed h1:3RgNmBoI9MZhsj3QxC+AP/qQhNwpCLOvYDYYsFrhFt0= -google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:OCdP9MfskevB/rbYvHTsXTtKC+3bHWajPdoKgjcYkfo= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc= google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= @@ -2562,18 +2572,18 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= -k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= -k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= -k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= -k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= -k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= -k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= +k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= diff --git a/pkg/api/handlers.go b/pkg/api/handlers.go index 886301c8659..798f4f0e20d 100644 --- a/pkg/api/handlers.go +++ b/pkg/api/handlers.go @@ -38,6 +38,7 @@ import ( "github.com/grafana/mimir/pkg/usagestats" "github.com/grafana/mimir/pkg/util" "github.com/grafana/mimir/pkg/util/chunkinfologger" + util_log "github.com/grafana/mimir/pkg/util/log" "github.com/grafana/mimir/pkg/util/validation" ) @@ -268,13 +269,15 @@ func NewQuerierHandler( nil, // Only needed for admin APIs. "", // This is for snapshots, which is disabled when admin APIs are disabled. Hence empty. false, // Disable admin APIs. - logger, + util_log.SlogFromGoKit(logger), func(context.Context) v1.RulesRetriever { return &querier.DummyRulesRetriever{} }, 0, 0, 0, // Remote read samples and concurrency limit. false, // Not an agent. regexp.MustCompile(".*"), func() (v1.RuntimeInfo, error) { return v1.RuntimeInfo{}, errors.New("not implemented") }, &v1.PrometheusVersion{}, + nil, + nil, // This is used for the stats API which we should not support. Or find other ways to. prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { return nil, nil }), reg, diff --git a/pkg/blockbuilder/blockbuilder_test.go b/pkg/blockbuilder/blockbuilder_test.go index ec0d330cbc5..a1649421dc0 100644 --- a/pkg/blockbuilder/blockbuilder_test.go +++ b/pkg/blockbuilder/blockbuilder_test.go @@ -16,6 +16,7 @@ import ( "github.com/grafana/dskit/services" "github.com/prometheus/client_golang/prometheus" promtest "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" "github.com/stretchr/testify/assert" @@ -164,7 +165,7 @@ func TestBlockBuilder_StartWithExistingCommit(t *testing.T) { expSamples := producedSamples[1+(len(producedSamples)/2):] bucketDir := path.Join(cfg.BlocksStorage.Bucket.Filesystem.Directory, "1") - db, err := tsdb.Open(bucketDir, log.NewNopLogger(), nil, nil, nil) + db, err := tsdb.Open(bucketDir, promslog.NewNopLogger(), nil, nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) compareQuery(t, @@ -291,7 +292,7 @@ func TestBlockBuilder_ReachHighWatermarkBeforeLastCycleSection(t *testing.T) { `), "cortex_blockbuilder_consumer_lag_records")) bucketDir := path.Join(cfg.BlocksStorage.Bucket.Filesystem.Directory, "1") - db, err := tsdb.Open(bucketDir, log.NewNopLogger(), nil, nil, nil) + db, err := tsdb.Open(bucketDir, promslog.NewNopLogger(), nil, nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) compareQuery(t, @@ -351,7 +352,7 @@ func TestBlockBuilder_WithMultipleTenants(t *testing.T) { for _, tenant := range tenants { bucketDir := path.Join(cfg.BlocksStorage.Bucket.Filesystem.Directory, tenant) - db, err := tsdb.Open(bucketDir, log.NewNopLogger(), nil, nil, nil) + db, err := tsdb.Open(bucketDir, promslog.NewNopLogger(), nil, nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) @@ -446,7 +447,7 @@ func TestBlockBuilder_WithNonMonotonicRecordTimestamps(t *testing.T) { require.NoError(t, bb.nextConsumeCycle(ctx, end)) bucketDir := path.Join(cfg.BlocksStorage.Bucket.Filesystem.Directory, tenantID) - db, err := tsdb.Open(bucketDir, log.NewNopLogger(), nil, nil, nil) + db, err := tsdb.Open(bucketDir, promslog.NewNopLogger(), nil, nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) @@ -517,7 +518,7 @@ func TestBlockBuilder_RetryOnTransientErrors(t *testing.T) { require.Eventually(t, func() bool { return kafkaCommits.Load() >= 1 }, 50*time.Second, 100*time.Millisecond, "expected kafka commits") bucketDir := path.Join(cfg.BlocksStorage.Bucket.Filesystem.Directory, "1") - db, err := tsdb.Open(bucketDir, log.NewNopLogger(), nil, nil, nil) + db, err := tsdb.Open(bucketDir, promslog.NewNopLogger(), nil, nil, nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, db.Close()) }) compareQuery(t, diff --git a/pkg/blockbuilder/tsdb.go b/pkg/blockbuilder/tsdb.go index ab865f39a5e..feb5f8dbdd5 100644 --- a/pkg/blockbuilder/tsdb.go +++ b/pkg/blockbuilder/tsdb.go @@ -256,7 +256,7 @@ func (b *TSDBBuilder) newTSDB(tenant tsdbTenant) (*userTSDB, error) { userID := tenant.tenantID userLogger := util_log.WithUserID(userID, b.logger) - db, err := tsdb.Open(udir, userLogger, nil, &tsdb.Options{ + db, err := tsdb.Open(udir, util_log.SlogFromGoKit(userLogger), nil, &tsdb.Options{ RetentionDuration: 0, MinBlockDuration: 2 * time.Hour.Milliseconds(), MaxBlockDuration: 2 * time.Hour.Milliseconds(), diff --git a/pkg/blockbuilder/tsdb_test.go b/pkg/blockbuilder/tsdb_test.go index 05c0e3f6e77..6d6f9eb97a0 100644 --- a/pkg/blockbuilder/tsdb_test.go +++ b/pkg/blockbuilder/tsdb_test.go @@ -19,6 +19,7 @@ import ( "github.com/grafana/dskit/flagext" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -265,7 +266,7 @@ func TestTSDBBuilder(t *testing.T) { require.NoError(t, err) require.Nil(t, builder.tsdbs[tenant]) - newDB, err := tsdb.Open(shipperDir, log.NewNopLogger(), nil, nil, nil) + newDB, err := tsdb.Open(shipperDir, promslog.NewNopLogger(), nil, nil, nil) require.NoError(t, err) // One for the in-order current range. Two for the out-of-order blocks: one for the current range diff --git a/pkg/compactor/bucket_compactor_e2e_test.go b/pkg/compactor/bucket_compactor_e2e_test.go index 88c129ed706..ded4174523d 100644 --- a/pkg/compactor/bucket_compactor_e2e_test.go +++ b/pkg/compactor/bucket_compactor_e2e_test.go @@ -27,6 +27,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" promtest "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/index" @@ -37,6 +38,7 @@ import ( "golang.org/x/sync/errgroup" "github.com/grafana/mimir/pkg/storage/tsdb/block" + util_log "github.com/grafana/mimir/pkg/util/log" ) func TestSyncer_GarbageCollect_e2e(t *testing.T) { @@ -232,7 +234,7 @@ func TestGroupCompactE2E(t *testing.T) { sy, err := newMetaSyncer(nil, nil, bkt, metaFetcher, duplicateBlocksFilter, blocksMarkedForDeletion) require.NoError(t, err) - comp, err := tsdb.NewLeveledCompactor(ctx, reg, logger, []int64{1000, 3000}, nil, nil) + comp, err := tsdb.NewLeveledCompactor(ctx, reg, util_log.SlogFromGoKit(logger), []int64{1000, 3000}, nil, nil) require.NoError(t, err) planner := NewSplitAndMergePlanner([]int64{1000, 3000}) @@ -690,7 +692,7 @@ func createBlockWithOptions( if err := g.Wait(); err != nil { return id, err } - c, err := tsdb.NewLeveledCompactor(ctx, nil, log.NewNopLogger(), []int64{maxt - mint}, nil, nil) + c, err := tsdb.NewLeveledCompactor(ctx, nil, promslog.NewNopLogger(), []int64{maxt - mint}, nil, nil) if err != nil { return id, errors.Wrap(err, "create compactor") } diff --git a/pkg/compactor/split_merge_compactor.go b/pkg/compactor/split_merge_compactor.go index 154c8454dc4..bfc63ad1431 100644 --- a/pkg/compactor/split_merge_compactor.go +++ b/pkg/compactor/split_merge_compactor.go @@ -8,6 +8,8 @@ import ( "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/tsdb" + + util_log "github.com/grafana/mimir/pkg/util/log" ) func splitAndMergeGrouperFactory(_ context.Context, cfg Config, cfgProvider ConfigProvider, userID string, logger log.Logger, _ prometheus.Registerer) Grouper { @@ -21,7 +23,7 @@ func splitAndMergeGrouperFactory(_ context.Context, cfg Config, cfgProvider Conf func splitAndMergeCompactorFactory(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (Compactor, Planner, error) { // We don't need to customise the TSDB compactor so we're just using the Prometheus one. - compactor, err := tsdb.NewLeveledCompactor(ctx, reg, logger, cfg.BlockRanges.ToMilliseconds(), nil, nil) + compactor, err := tsdb.NewLeveledCompactor(ctx, reg, util_log.SlogFromGoKit(logger), cfg.BlockRanges.ToMilliseconds(), nil, nil) if err != nil { return nil, nil, err } diff --git a/pkg/compactor/split_merge_compactor_test.go b/pkg/compactor/split_merge_compactor_test.go index 3614a9b5c0f..8f008dc020d 100644 --- a/pkg/compactor/split_merge_compactor_test.go +++ b/pkg/compactor/split_merge_compactor_test.go @@ -28,6 +28,7 @@ import ( "github.com/grafana/mimir/pkg/storage/sharding" mimir_tsdb "github.com/grafana/mimir/pkg/storage/tsdb" "github.com/grafana/mimir/pkg/storage/tsdb/block" + util_log "github.com/grafana/mimir/pkg/util/log" util_test "github.com/grafana/mimir/pkg/util/test" ) @@ -774,7 +775,7 @@ func TestMultitenantCompactor_ShouldGuaranteeSeriesShardingConsistencyOverTheTim for _, actualMeta := range actualMetas { expectedSeriesIDs := expectedSeriesIDByShard[actualMeta.Thanos.Labels[mimir_tsdb.CompactorShardIDExternalLabel]] - b, err := tsdb.OpenBlock(logger, filepath.Join(storageDir, userID, actualMeta.ULID.String()), nil) + b, err := tsdb.OpenBlock(util_log.SlogFromGoKit(logger), filepath.Join(storageDir, userID, actualMeta.ULID.String()), nil) require.NoError(t, err) indexReader, err := b.Index() diff --git a/pkg/distributor/otel.go b/pkg/distributor/otel.go index cfdc7ed17c7..8338e964f9d 100644 --- a/pkg/distributor/otel.go +++ b/pkg/distributor/otel.go @@ -399,7 +399,7 @@ func otelMetricsToTimeseries(ctx context.Context, tenantID string, addSuffixes, _, errs := converter.FromMetrics(ctx, md, otlp.Settings{ AddMetricSuffixes: addSuffixes, EnableCreatedTimestampZeroIngestion: enableCTZeroIngestion, - }, logger) + }, utillog.SlogFromGoKit(logger)) mimirTS := converter.TimeSeries() if errs != nil { dropped := len(multierr.Errors(errs)) diff --git a/pkg/distributor/otlp/helper_generated.go b/pkg/distributor/otlp/helper_generated.go index 7889da3a355..5265b862601 100644 --- a/pkg/distributor/otlp/helper_generated.go +++ b/pkg/distributor/otlp/helper_generated.go @@ -23,6 +23,7 @@ import ( "encoding/hex" "fmt" "log" + "log/slog" "math" "slices" "sort" @@ -30,8 +31,6 @@ import ( "unicode/utf8" "github.com/cespare/xxhash/v2" - gokitlog "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -247,7 +246,7 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool { // However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets: // https://github.com/prometheus/prometheus/issues/13485. func (c *MimirConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, - resource pcommon.Resource, settings Settings, baseName string, logger gokitlog.Logger) error { + resource pcommon.Resource, settings Settings, baseName string, logger *slog.Logger) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -339,7 +338,7 @@ func (c *MimirConverter) addHistogramDataPoints(ctx context.Context, dataPoints labels := createLabels(baseName+createdSuffix, baseLabels) c.addTimeSeriesIfNeeded(labels, startTimestampMs, pt.Timestamp()) } - level.Debug(logger).Log("labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "histogram") + logger.Debug("addHistogramDataPoints", "labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "histogram") } return nil @@ -361,9 +360,17 @@ func getPromExemplars[T exemplarType](ctx context.Context, everyN *everyNTimes, exemplarRunes := 0 promExemplar := mimirpb.Exemplar{ - Value: exemplar.DoubleValue(), TimestampMs: timestamp.FromTime(exemplar.Timestamp().AsTime()), } + switch exemplar.ValueType() { + case pmetric.ExemplarValueTypeInt: + promExemplar.Value = float64(exemplar.IntValue()) + case pmetric.ExemplarValueTypeDouble: + promExemplar.Value = exemplar.DoubleValue() + default: + return nil, fmt.Errorf("unsupported exemplar value type: %v", exemplar.ValueType()) + } + if traceID := exemplar.TraceID(); !traceID.IsEmpty() { val := hex.EncodeToString(traceID[:]) exemplarRunes += utf8.RuneCountInString(traceIDKey) + utf8.RuneCountInString(val) @@ -445,7 +452,7 @@ func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { } func (c *MimirConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource, - settings Settings, baseName string, logger gokitlog.Logger) error { + settings Settings, baseName string, logger *slog.Logger) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -502,7 +509,7 @@ func (c *MimirConverter) addSummaryDataPoints(ctx context.Context, dataPoints pm c.addTimeSeriesIfNeeded(createdLabels, startTimestampMs, pt.Timestamp()) } - level.Debug(logger).Log("labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "summary") + logger.Debug("addSummaryDataPoints", "labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "summary") } return nil @@ -586,7 +593,7 @@ const defaultIntervalForStartTimestamps = int64(300_000) // make use of its direct support fort Created Timestamps instead. // See https://opentelemetry.io/docs/specs/otel/metrics/data-model/#resets-and-gaps to know more about how OTel handles // resets for cumulative metrics. -func (c *MimirConverter) handleStartTime(startTs, ts int64, labels []mimirpb.LabelAdapter, settings Settings, typ string, value float64, logger gokitlog.Logger) { +func (c *MimirConverter) handleStartTime(startTs, ts int64, labels []mimirpb.LabelAdapter, settings Settings, typ string, value float64, logger *slog.Logger) { if !settings.EnableCreatedTimestampZeroIngestion { return } @@ -608,7 +615,7 @@ func (c *MimirConverter) handleStartTime(startTs, ts int64, labels []mimirpb.Lab return } - level.Debug(logger).Log("msg", "adding zero value at start_ts", "type", typ, "labels", labelsStringer(labels), "start_ts", startTs, "sample_ts", ts, "sample_value", value) + logger.Debug("adding zero value at start_ts", "type", typ, "labels", labelsStringer(labels), "start_ts", startTs, "sample_ts", ts, "sample_value", value) // See https://github.com/prometheus/prometheus/issues/14600 for context. c.addSample(&mimirpb.Sample{TimestampMs: startTs}, labels) @@ -684,10 +691,10 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta return } - ts := convertTimeStamp(timestamp) sample := &mimirpb.Sample{ - Value: float64(1), - TimestampMs: ts, + Value: float64(1), + // convert ns to ms + TimestampMs: convertTimeStamp(timestamp), } converter.addSample(sample, labels) } diff --git a/pkg/distributor/otlp/histograms_generated.go b/pkg/distributor/otlp/histograms_generated.go index 29219fb5de6..09a97758fb9 100644 --- a/pkg/distributor/otlp/histograms_generated.go +++ b/pkg/distributor/otlp/histograms_generated.go @@ -63,7 +63,6 @@ func (c *MimirConverter) addExponentialHistogramDataPoints(ctx context.Context, promName, ) ts, _ := c.getOrCreateTimeSeries(lbls) - ts.Histograms = append(ts.Histograms, histogram) exemplars, err := getPromExemplars[pmetric.ExponentialHistogramDataPoint](ctx, &c.everyN, pt) @@ -76,8 +75,8 @@ func (c *MimirConverter) addExponentialHistogramDataPoints(ctx context.Context, return annots, nil } -// exponentialToNativeHistogram translates OTel Exponential Histogram data point -// to Prometheus Native Histogram. +// exponentialToNativeHistogram translates an OTel Exponential Histogram data point +// to a Prometheus Native Histogram. func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (mimirpb.Histogram, annotations.Annotations, error) { var annots annotations.Annotations scale := p.Scale() diff --git a/pkg/distributor/otlp/metrics_to_prw_generated.go b/pkg/distributor/otlp/metrics_to_prw_generated.go index 89f9facc2dc..ea8526657e6 100644 --- a/pkg/distributor/otlp/metrics_to_prw_generated.go +++ b/pkg/distributor/otlp/metrics_to_prw_generated.go @@ -22,17 +22,18 @@ import ( "context" "errors" "fmt" + "log/slog" "sort" "strings" "time" - "github.com/go-kit/log" - prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" - "github.com/prometheus/prometheus/util/annotations" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" + prometheustranslator "github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus" + "github.com/prometheus/prometheus/util/annotations" + "github.com/grafana/mimir/pkg/mimirpb" ) @@ -69,7 +70,7 @@ func NewMimirConverter() *MimirConverter { } // FromMetrics converts pmetric.Metrics to Mimir remote write format. -func (c *MimirConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings, logger log.Logger) (annots annotations.Annotations, errs error) { +func (c *MimirConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings, logger *slog.Logger) (annots annotations.Annotations, errs error) { c.everyN = everyNTimes{n: 128} resourceMetricsSlice := md.ResourceMetrics() for i := 0; i < resourceMetricsSlice.Len(); i++ { diff --git a/pkg/distributor/otlp/number_data_points_generated.go b/pkg/distributor/otlp/number_data_points_generated.go index 4b07e749213..f6e32c3cb8b 100644 --- a/pkg/distributor/otlp/number_data_points_generated.go +++ b/pkg/distributor/otlp/number_data_points_generated.go @@ -20,10 +20,9 @@ package otlp import ( "context" + "log/slog" "math" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -50,9 +49,9 @@ func (c *MimirConverter) addGaugeNumberDataPoints(ctx context.Context, dataPoint model.MetricNameLabel, name, ) - timestamp := convertTimeStamp(pt.Timestamp()) sample := &mimirpb.Sample{ - TimestampMs: timestamp, + // convert ns to ms + TimestampMs: convertTimeStamp(pt.Timestamp()), } switch pt.ValueType() { case pmetric.NumberDataPointValueTypeInt: @@ -70,7 +69,7 @@ func (c *MimirConverter) addGaugeNumberDataPoints(ctx context.Context, dataPoint } func (c *MimirConverter) addSumNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, logger log.Logger) error { + resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, logger *slog.Logger) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -88,8 +87,8 @@ func (c *MimirConverter) addSumNumberDataPoints(ctx context.Context, dataPoints model.MetricNameLabel, name, ) - sample := &mimirpb.Sample{ + // convert ns to ms TimestampMs: timestamp, } switch pt.ValueType() { @@ -130,7 +129,7 @@ func (c *MimirConverter) addSumNumberDataPoints(ctx context.Context, dataPoints } c.addTimeSeriesIfNeeded(createdLabels, startTimestampMs, pt.Timestamp()) } - level.Debug(logger).Log("labels", labelsStringer(lbls), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "sum") + logger.Debug("addSumNumberDataPoints", "labels", labelsStringer(lbls), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "sum") } return nil diff --git a/pkg/frontend/querymiddleware/astmapper/parallel.go b/pkg/frontend/querymiddleware/astmapper/parallel.go index 7414b5cb240..040509b4cce 100644 --- a/pkg/frontend/querymiddleware/astmapper/parallel.go +++ b/pkg/frontend/querymiddleware/astmapper/parallel.go @@ -27,6 +27,8 @@ var NonParallelFuncs = []string{ // The following functions are not safe to parallelize. "absent", "absent_over_time", + // TODO: Find out whether should be parallelizable. + "info", "histogram_quantile", "limitk", "limit_ratio", diff --git a/pkg/frontend/querymiddleware/astmapper/parallel_test.go b/pkg/frontend/querymiddleware/astmapper/parallel_test.go index a961215b1f2..e54122f46aa 100644 --- a/pkg/frontend/querymiddleware/astmapper/parallel_test.go +++ b/pkg/frontend/querymiddleware/astmapper/parallel_test.go @@ -232,6 +232,11 @@ func TestFunctionsWithDefaultsIsUpToDate(t *testing.T) { return } + if f.Name == "info" { + // the info function is variadic but no parameter is a timestamp, so it's not relevant for sharding purposes. + return + } + // Rest of the functions with known defaults are functions with a default time() argument. require.Containsf(t, FuncsWithDefaultTimeArg, name, "Function %q has variable arguments, and it's not in the list of functions with default time() argument.", f.Name) }) diff --git a/pkg/frontend/querymiddleware/cardinality_query_cache_test.go b/pkg/frontend/querymiddleware/cardinality_query_cache_test.go index 89c5f2d6401..b63c0cbb3e6 100644 --- a/pkg/frontend/querymiddleware/cardinality_query_cache_test.go +++ b/pkg/frontend/querymiddleware/cardinality_query_cache_test.go @@ -14,9 +14,10 @@ import ( "github.com/grafana/dskit/cache" "github.com/grafana/dskit/tenant" "github.com/grafana/dskit/user" - "github.com/prometheus/prometheus/util/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + mimirtest "github.com/grafana/mimir/pkg/util/test" ) func TestCardinalityQueryCache_RoundTrip_WithTenantFederation(t *testing.T) { @@ -66,7 +67,7 @@ func TestCardinalityQueryCache_RoundTrip_WithTenantFederation(t *testing.T) { cacheBackend := cache.NewInstrumentedMockCache() limits := multiTenantMockLimits{byTenant: testData.limits} - rt := newCardinalityQueryCacheRoundTripper(cacheBackend, DefaultCacheKeyGenerator{}, limits, downstream, testutil.NewLogger(t), nil) + rt := newCardinalityQueryCacheRoundTripper(cacheBackend, DefaultCacheKeyGenerator{}, limits, downstream, mimirtest.NewTestingLogger(t), nil) res, err := rt.RoundTrip(req) require.NoError(t, err) diff --git a/pkg/frontend/querymiddleware/generic_query_cache_test.go b/pkg/frontend/querymiddleware/generic_query_cache_test.go index eae385bad9f..5b4811afca0 100644 --- a/pkg/frontend/querymiddleware/generic_query_cache_test.go +++ b/pkg/frontend/querymiddleware/generic_query_cache_test.go @@ -18,9 +18,10 @@ import ( "github.com/grafana/dskit/user" "github.com/prometheus/client_golang/prometheus" promtest "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/prometheus/prometheus/util/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + mimirtest "github.com/grafana/mimir/pkg/util/test" ) type newGenericQueryCacheFunc func(cache cache.Cache, splitter CacheKeyGenerator, limits Limits, next http.RoundTripper, logger log.Logger, reg prometheus.Registerer) http.RoundTripper @@ -226,7 +227,7 @@ func testGenericQueryCacheRoundTrip(t *testing.T, newRoundTripper newGenericQuer initialStoreCallsCount := cacheBackend.CountStoreCalls() reg := prometheus.NewPedanticRegistry() - rt := newRoundTripper(cacheBackend, DefaultCacheKeyGenerator{codec: NewPrometheusCodec(reg, 0*time.Minute, formatJSON, nil)}, limits, downstream, testutil.NewLogger(t), reg) + rt := newRoundTripper(cacheBackend, DefaultCacheKeyGenerator{codec: NewPrometheusCodec(reg, 0*time.Minute, formatJSON, nil)}, limits, downstream, mimirtest.NewTestingLogger(t), reg) res, err := rt.RoundTrip(req) require.NoError(t, err) diff --git a/pkg/frontend/querymiddleware/querysharding_test.go b/pkg/frontend/querymiddleware/querysharding_test.go index c2809acc394..cb3be32f7ec 100644 --- a/pkg/frontend/querymiddleware/querysharding_test.go +++ b/pkg/frontend/querymiddleware/querysharding_test.go @@ -24,6 +24,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/value" @@ -304,7 +305,7 @@ func TestQuerySharding_Correctness(t *testing.T) { query: ` sum by(unique) (metric_counter) * - on (unique) group_left (group_1) + on (unique) group_left (group_1) avg by (unique, group_1) (metric_counter)`, expectedShardedQueries: 3, }, @@ -1510,7 +1511,7 @@ func TestQuerySharding_ShouldReturnErrorInCorrectFormat(t *testing.T) { var ( engine = newEngine() engineTimeout = promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 10e6, Timeout: 50 * time.Millisecond, @@ -1523,7 +1524,7 @@ func TestQuerySharding_ShouldReturnErrorInCorrectFormat(t *testing.T) { }, }) engineSampleLimit = promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 1, Timeout: time.Hour, @@ -1937,7 +1938,7 @@ func BenchmarkQuerySharding(b *testing.B) { time.Millisecond / 10, } { engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 100000000, Timeout: time.Minute, @@ -2505,7 +2506,7 @@ func (i *seriesIteratorMock) Warnings() annotations.Annotations { // newEngine creates and return a new promql.Engine used for testing. func newEngine() *promql.Engine { return promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 10e6, Timeout: 1 * time.Hour, diff --git a/pkg/frontend/querymiddleware/roundtrip_test.go b/pkg/frontend/querymiddleware/roundtrip_test.go index 715661ba174..16d168e6b56 100644 --- a/pkg/frontend/querymiddleware/roundtrip_test.go +++ b/pkg/frontend/querymiddleware/roundtrip_test.go @@ -30,6 +30,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/promql" "github.com/stretchr/testify/assert" @@ -82,7 +83,7 @@ func TestTripperware_RangeQuery(t *testing.T) { newTestPrometheusCodec(), nil, promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 1000, Timeout: time.Minute, @@ -135,7 +136,7 @@ func TestTripperware_InstantQuery(t *testing.T) { codec, nil, promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 1000, Timeout: time.Minute, @@ -505,7 +506,7 @@ func TestTripperware_Metrics(t *testing.T) { newTestPrometheusCodec(), nil, promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 1000, Timeout: time.Minute, @@ -767,7 +768,7 @@ func TestTripperware_RemoteRead(t *testing.T) { newTestPrometheusCodec(), nil, promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 1000, Timeout: time.Minute, @@ -902,7 +903,7 @@ func TestTripperware_ShouldSupportReadConsistencyOffsetsInjection(t *testing.T) NewPrometheusCodec(nil, 0, formatJSON, nil), nil, promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, MaxSamples: 1000, Timeout: time.Minute, diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 4daf0751ae8..3d13d9d8527 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -2670,7 +2670,7 @@ func (i *Ingester) createTSDB(userID string, walReplayConcurrency int) (*userTSD oooTW := i.limits.OutOfOrderTimeWindow(userID) // Create a new user database - db, err := tsdb.Open(udir, userLogger, tsdbPromReg, &tsdb.Options{ + db, err := tsdb.Open(udir, util_log.SlogFromGoKit(userLogger), tsdbPromReg, &tsdb.Options{ RetentionDuration: i.cfg.BlocksStorageConfig.TSDB.Retention.Milliseconds(), MinBlockDuration: blockRanges[0], MaxBlockDuration: blockRanges[len(blockRanges)-1], diff --git a/pkg/ingester/ingester_early_compaction_test.go b/pkg/ingester/ingester_early_compaction_test.go index 5cf29b7de99..c12b6bb65dc 100644 --- a/pkg/ingester/ingester_early_compaction_test.go +++ b/pkg/ingester/ingester_early_compaction_test.go @@ -12,13 +12,13 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" "github.com/grafana/dskit/test" "github.com/grafana/dskit/user" "github.com/oklog/ulid" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -692,7 +692,7 @@ func listBlocksInDir(t *testing.T, dir string) (ids []ulid.ULID) { } func readMetricSamplesFromBlockDir(t *testing.T, blockDir string, metricName string) (results model.Matrix) { - block, err := tsdb.OpenBlock(log.NewNopLogger(), blockDir, nil) + block, err := tsdb.OpenBlock(promslog.NewNopLogger(), blockDir, nil) require.NoError(t, err) defer func() { require.NoError(t, block.Close()) diff --git a/pkg/ingester/user_tsdb_test.go b/pkg/ingester/user_tsdb_test.go index 8d9d0ac8ce8..ebf024e7112 100644 --- a/pkg/ingester/user_tsdb_test.go +++ b/pkg/ingester/user_tsdb_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/go-kit/log" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" "github.com/stretchr/testify/assert" @@ -198,7 +199,7 @@ func TestNextForcedHeadCompactionRange(t *testing.T) { } func TestGetSeriesCountAndMinLocalLimit(t *testing.T) { - tsdbDB, err := tsdb.Open(t.TempDir(), log.NewNopLogger(), nil, tsdb.DefaultOptions(), nil) + tsdbDB, err := tsdb.Open(t.TempDir(), promslog.NewNopLogger(), nil, tsdb.DefaultOptions(), nil) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, tsdbDB.Close()) diff --git a/pkg/mimirtool/backfill/backfill.go b/pkg/mimirtool/backfill/backfill.go index ff92f7d774b..cf435005be8 100644 --- a/pkg/mimirtool/backfill/backfill.go +++ b/pkg/mimirtool/backfill/backfill.go @@ -15,9 +15,9 @@ import ( "time" "github.com/alecthomas/units" - "github.com/go-kit/log" "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" @@ -93,7 +93,7 @@ func CreateBlocks(input IteratorCreator, mint, maxt int64, maxSamplesInAppender for t := mint; t <= maxt; t = t + blockDuration/2 { err := func() error { - w, err := tsdb.NewBlockWriter(log.NewNopLogger(), outputDir, blockDuration) + w, err := tsdb.NewBlockWriter(promslog.NewNopLogger(), outputDir, blockDuration) if err != nil { return errors.Wrap(err, "block writer") } diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 0ff63176856..70d8ddc488f 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -58,6 +58,7 @@ import ( "github.com/grafana/mimir/pkg/util" "github.com/grafana/mimir/pkg/util/globalerror" "github.com/grafana/mimir/pkg/util/limiter" + util_log "github.com/grafana/mimir/pkg/util/log" "github.com/grafana/mimir/pkg/util/test" ) @@ -2830,7 +2831,7 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { defer services.StopAndAwaitTerminated(context.Background(), queryable) // nolint:errcheck engine := promql.NewEngine(promql.EngineOpts{ - Logger: logger, + Logger: util_log.SlogFromGoKit(logger), Timeout: 10 * time.Second, MaxSamples: 1e6, }) diff --git a/pkg/querier/duplicates_test.go b/pkg/querier/duplicates_test.go index 97d8b5598d9..1b3d29e21aa 100644 --- a/pkg/querier/duplicates_test.go +++ b/pkg/querier/duplicates_test.go @@ -11,8 +11,8 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/storage" @@ -94,7 +94,7 @@ func runPromQLAndGetJSONResult(t *testing.T, query string, ts mimirpb.TimeSeries tq := &testQueryable{ts: newTimeSeriesSeriesSet([]mimirpb.TimeSeries{ts})} engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Timeout: 10 * time.Second, MaxSamples: 1e6, }) diff --git a/pkg/querier/engine/config.go b/pkg/querier/engine/config.go index 2298e018467..e9fbf896d0a 100644 --- a/pkg/querier/engine/config.go +++ b/pkg/querier/engine/config.go @@ -13,6 +13,7 @@ import ( "github.com/grafana/mimir/pkg/streamingpromql" //lint:ignore faillint streamingpromql is fine "github.com/grafana/mimir/pkg/util/activitytracker" //lint:ignore faillint activitytracker is fine + util_log "github.com/grafana/mimir/pkg/util/log" //lint:ignore faillint log is fine ) // Config holds the PromQL engine config exposed by Mimir. @@ -58,7 +59,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { // to indicate whether the experimental PromQL functions should be enabled. func NewPromQLEngineOptions(cfg Config, activityTracker *activitytracker.ActivityTracker, logger log.Logger, reg prometheus.Registerer) (promql.EngineOpts, streamingpromql.EngineOpts, bool) { commonOpts := promql.EngineOpts{ - Logger: logger, + Logger: util_log.SlogFromGoKit(logger), Reg: reg, ActiveQueryTracker: newQueryTracker(activityTracker), MaxSamples: cfg.MaxSamples, diff --git a/pkg/querier/error_translate_queryable_test.go b/pkg/querier/error_translate_queryable_test.go index 2a580b9b0a2..9c28ce9b025 100644 --- a/pkg/querier/error_translate_queryable_test.go +++ b/pkg/querier/error_translate_queryable_test.go @@ -13,12 +13,12 @@ import ( "testing" "time" - "github.com/go-kit/log" "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/user" "github.com/grafana/regexp" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/route" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/labels" @@ -135,7 +135,7 @@ func TestApiStatusCodes(t *testing.T) { func createPrometheusAPI(q storage.SampleAndChunkQueryable) *route.Router { engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), Reg: nil, ActiveQueryTracker: nil, MaxSamples: 100, @@ -157,13 +157,15 @@ func createPrometheusAPI(q storage.SampleAndChunkQueryable) *route.Router { nil, // Only needed for admin APIs. "", // This is for snapshots, which is disabled when admin APIs are disabled. Hence empty. false, // Disable admin APIs. - log.NewNopLogger(), + promslog.NewNopLogger(), func(context.Context) v1.RulesRetriever { return &DummyRulesRetriever{} }, 0, 0, 0, // Remote read samples and concurrency limit. false, // Not an agent. regexp.MustCompile(".*"), func() (v1.RuntimeInfo, error) { return v1.RuntimeInfo{}, errors.New("not implemented") }, &v1.PrometheusVersion{}, + nil, + nil, prometheus.DefaultGatherer, nil, nil, diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 051e9347f9c..f17d420f835 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -16,6 +16,7 @@ import ( "github.com/grafana/dskit/user" "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/promql" @@ -36,6 +37,7 @@ import ( "github.com/grafana/mimir/pkg/querier/stats" "github.com/grafana/mimir/pkg/storage/chunk" "github.com/grafana/mimir/pkg/util" + util_log "github.com/grafana/mimir/pkg/util/log" "github.com/grafana/mimir/pkg/util/spanlogger" "github.com/grafana/mimir/pkg/util/test" "github.com/grafana/mimir/pkg/util/validation" @@ -323,7 +325,7 @@ func TestQuerier_QueryableReturnsChunksOutsideQueriedRange(t *testing.T) { require.NoError(t, err) engine := promql.NewEngine(promql.EngineOpts{ - Logger: logger, + Logger: util_log.SlogFromGoKit(logger), MaxSamples: 1e6, Timeout: 1 * time.Minute, }) @@ -410,7 +412,7 @@ func TestBatchMergeChunks(t *testing.T) { nil) engine := promql.NewEngine(promql.EngineOpts{ - Logger: logger, + Logger: util_log.SlogFromGoKit(logger), MaxSamples: 1e6, Timeout: 1 * time.Minute, }) @@ -482,7 +484,7 @@ func BenchmarkQueryExecute(b *testing.B) { nil) engine := promql.NewEngine(promql.EngineOpts{ - Logger: logger, + Logger: util_log.SlogFromGoKit(logger), MaxSamples: 1e6, Timeout: 1 * time.Minute, }) @@ -626,10 +628,10 @@ func TestQuerier_QueryIngestersWithinConfig(t *testing.T) { } dir := t.TempDir() - queryTracker := promql.NewActiveQueryTracker(dir, 10, log.NewNopLogger()) + queryTracker := promql.NewActiveQueryTracker(dir, 10, promslog.NewNopLogger()) engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), ActiveQueryTracker: queryTracker, MaxSamples: 1e6, Timeout: 1 * time.Minute, @@ -698,7 +700,7 @@ func TestQuerier_ValidateQueryTimeRange(t *testing.T) { } engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), MaxSamples: 1e6, Timeout: 1 * time.Minute, LookbackDelta: engineLookbackDelta, @@ -791,7 +793,7 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLength(t *testing.T) { // Create the PromQL engine to execute the query. engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), ActiveQueryTracker: nil, MaxSamples: 1e6, Timeout: 1 * time.Minute, @@ -884,7 +886,7 @@ func TestQuerier_ValidateQueryTimeRange_MaxQueryLookback(t *testing.T) { logger := log.NewNopLogger() // Create the PromQL engine to execute the queries. engine := promql.NewEngine(promql.EngineOpts{ - Logger: logger, + Logger: util_log.SlogFromGoKit(logger), ActiveQueryTracker: nil, MaxSamples: 1e6, LookbackDelta: engineLookbackDelta, @@ -1132,11 +1134,11 @@ func TestQuerier_ValidateQueryTimeRange_MaxLabelsQueryRange(t *testing.T) { func testRangeQuery(t testing.TB, queryable storage.Queryable, end model.Time, q query) *promql.Result { dir := t.TempDir() - queryTracker := promql.NewActiveQueryTracker(dir, 10, log.NewNopLogger()) + queryTracker := promql.NewActiveQueryTracker(dir, 10, promslog.NewNopLogger()) from, through, step := time.Unix(0, 0), end.Time(), q.step engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), ActiveQueryTracker: queryTracker, MaxSamples: 1e6, Timeout: 1 * time.Minute, @@ -1286,10 +1288,10 @@ func TestQuerier_QueryStoreAfterConfig(t *testing.T) { } dir := t.TempDir() - queryTracker := promql.NewActiveQueryTracker(dir, 10, log.NewNopLogger()) + queryTracker := promql.NewActiveQueryTracker(dir, 10, promslog.NewNopLogger()) engine := promql.NewEngine(promql.EngineOpts{ - Logger: log.NewNopLogger(), + Logger: promslog.NewNopLogger(), ActiveQueryTracker: queryTracker, MaxSamples: 1e6, Timeout: 1 * time.Minute, diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index c7050ac4493..b27e60d47c3 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -50,6 +50,9 @@ type PusherAppender struct { userID string } +func (a *PusherAppender) SetOptions(*storage.AppendOptions) { +} + func (a *PusherAppender) Append(_ storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) { a.labels = append(a.labels, mimirpb.FromLabelsToLabelAdapters(l)) a.samples = append(a.samples, mimirpb.Sample{ @@ -83,6 +86,10 @@ func (a *PusherAppender) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels return 0, errors.New("CT zero samples are unsupported") } +func (a *PusherAppender) AppendHistogramCTZeroSample(storage.SeriesRef, labels.Labels, int64, int64, *histogram.Histogram, *histogram.FloatHistogram) (storage.SeriesRef, error) { + return 0, errors.New("CT zero samples are unsupported") +} + func (a *PusherAppender) Commit() error { a.totalWrites.Inc() @@ -143,6 +150,9 @@ func (t *PusherAppendable) Appender(ctx context.Context) storage.Appender { type NoopAppender struct{} +func (a *NoopAppender) SetOptions(*storage.AppendOptions) { +} + func (a *NoopAppender) Append(_ storage.SeriesRef, _ labels.Labels, _ int64, _ float64) (storage.SeriesRef, error) { return 0, nil } @@ -163,6 +173,10 @@ func (a *NoopAppender) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, return 0, errors.New("CT zero samples are unsupported") } +func (a *NoopAppender) AppendHistogramCTZeroSample(storage.SeriesRef, labels.Labels, int64, int64, *histogram.Histogram, *histogram.FloatHistogram) (storage.SeriesRef, error) { + return 0, errors.New("CT zero samples are unsupported") +} + func (a *NoopAppender) Commit() error { return nil } @@ -375,7 +389,7 @@ func DefaultTenantManagerFactory( GroupEvaluationContextFunc: FederatedGroupContextFunc, ExternalURL: cfg.ExternalURL.URL, NotifyFunc: rules.SendAlerts(notifier, cfg.ExternalURL.String()), - Logger: log.With(logger, "component", "ruler", "insight", true, "user", userID), + Logger: util_log.SlogFromGoKit(log.With(logger, "component", "ruler", "insight", true, "user", userID)), Registerer: reg, OutageTolerance: cfg.OutageTolerance, ForGracePeriod: cfg.ForGracePeriod, diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go index da2327819b7..35be76d9a56 100644 --- a/pkg/ruler/compat_test.go +++ b/pkg/ruler/compat_test.go @@ -22,6 +22,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/rulefmt" @@ -41,6 +42,7 @@ import ( "github.com/grafana/mimir/pkg/querier/api" "github.com/grafana/mimir/pkg/ruler/rulespb" "github.com/grafana/mimir/pkg/storage/series" + util_log "github.com/grafana/mimir/pkg/util/log" "github.com/grafana/mimir/pkg/util/test" ) @@ -492,11 +494,11 @@ func TestDefaultManagerFactory_CorrectQueryableUsed(t *testing.T) { // setup cfg := defaultRulerConfig(t) options := applyPrepareOptions(t, cfg.Ring.Common.InstanceID) - notifierManager := notifier.NewManager(¬ifier.Options{Do: func(_ context.Context, _ *http.Client, _ *http.Request) (*http.Response, error) { return nil, nil }}, options.logger) + notifierManager := notifier.NewManager(¬ifier.Options{Do: func(_ context.Context, _ *http.Client, _ *http.Request) (*http.Response, error) { return nil, nil }}, util_log.SlogFromGoKit(options.logger)) ruleFiles := writeRuleGroupToFiles(t, cfg.RulePath, options.logger, userID, tc.ruleGroup) regularQueryable, federatedQueryable := newMockQueryable(), newMockQueryable() - tracker := promql.NewActiveQueryTracker(t.TempDir(), 20, log.NewNopLogger()) + tracker := promql.NewActiveQueryTracker(t.TempDir(), 20, promslog.NewNopLogger()) eng := promql.NewEngine(promql.EngineOpts{ MaxSamples: 1e6, ActiveQueryTracker: tracker, @@ -560,10 +562,10 @@ func TestDefaultManagerFactory_ShouldNotWriteRecordingRuleResultsWhenDisabled(t var ( options = applyPrepareOptions(t, cfg.Ring.Common.InstanceID) - notifierManager = notifier.NewManager(¬ifier.Options{Do: func(_ context.Context, _ *http.Client, _ *http.Request) (*http.Response, error) { return nil, nil }}, options.logger) + notifierManager = notifier.NewManager(¬ifier.Options{Do: func(_ context.Context, _ *http.Client, _ *http.Request) (*http.Response, error) { return nil, nil }}, util_log.SlogFromGoKit(options.logger)) ruleFiles = writeRuleGroupToFiles(t, cfg.RulePath, options.logger, userID, ruleGroup) queryable = newMockQueryable() - tracker = promql.NewActiveQueryTracker(t.TempDir(), 20, log.NewNopLogger()) + tracker = promql.NewActiveQueryTracker(t.TempDir(), 20, util_log.SlogFromGoKit(log.NewNopLogger())) eng = promql.NewEngine(promql.EngineOpts{ MaxSamples: 1e6, ActiveQueryTracker: tracker, @@ -648,8 +650,8 @@ func TestDefaultManagerFactory_ShouldInjectReadConsistencyToContextBasedOnRuleDe var ( cfg = defaultRulerConfig(t) options = applyPrepareOptions(t, cfg.Ring.Common.InstanceID) - notifierManager = notifier.NewManager(¬ifier.Options{Do: func(_ context.Context, _ *http.Client, _ *http.Request) (*http.Response, error) { return nil, nil }}, options.logger) - tracker = promql.NewActiveQueryTracker(t.TempDir(), 20, options.logger) + notifierManager = notifier.NewManager(¬ifier.Options{Do: func(_ context.Context, _ *http.Client, _ *http.Request) (*http.Response, error) { return nil, nil }}, util_log.SlogFromGoKit(options.logger)) + tracker = promql.NewActiveQueryTracker(t.TempDir(), 20, util_log.SlogFromGoKit(options.logger)) eng = promql.NewEngine(promql.EngineOpts{ MaxSamples: 1e6, ActiveQueryTracker: tracker, @@ -727,8 +729,8 @@ func TestDefaultManagerFactory_ShouldInjectStrongReadConsistencyToContextWhenQue var ( options = applyPrepareOptions(t, cfg.Ring.Common.InstanceID) - notifierManager = notifier.NewManager(¬ifier.Options{Do: func(_ context.Context, _ *http.Client, _ *http.Request) (*http.Response, error) { return nil, nil }}, options.logger) - tracker = promql.NewActiveQueryTracker(t.TempDir(), 20, options.logger) + notifierManager = notifier.NewManager(¬ifier.Options{Do: func(_ context.Context, _ *http.Client, _ *http.Request) (*http.Response, error) { return nil, nil }}, util_log.SlogFromGoKit(options.logger)) + tracker = promql.NewActiveQueryTracker(t.TempDir(), 20, util_log.SlogFromGoKit(options.logger)) eng = promql.NewEngine(promql.EngineOpts{ MaxSamples: 1e6, ActiveQueryTracker: tracker, diff --git a/pkg/ruler/notifier.go b/pkg/ruler/notifier.go index f8447aef9e7..17eaacb91ae 100644 --- a/pkg/ruler/notifier.go +++ b/pkg/ruler/notifier.go @@ -25,6 +25,7 @@ import ( "github.com/prometheus/prometheus/notifier" "github.com/grafana/mimir/pkg/util" + util_log "github.com/grafana/mimir/pkg/util/log" ) var errRulerNotifierStopped = cancellation.NewErrorf("rulerNotifier stopped") @@ -58,10 +59,11 @@ func newRulerNotifier(o *notifier.Options, l gklog.Logger) (*rulerNotifier, erro if err != nil { return nil, err } + sl := util_log.SlogFromGoKit(l) return &rulerNotifier{ - notifier: notifier.NewManager(o, l), + notifier: notifier.NewManager(o, sl), sdCancel: sdCancel, - sdManager: discovery.NewManager(sdCtx, l, o.Registerer, sdMetrics), + sdManager: discovery.NewManager(sdCtx, sl, o.Registerer, sdMetrics), logger: l, }, nil } diff --git a/pkg/ruler/ruler_test.go b/pkg/ruler/ruler_test.go index ff52af87bf1..390d147295e 100644 --- a/pkg/ruler/ruler_test.go +++ b/pkg/ruler/ruler_test.go @@ -38,7 +38,6 @@ import ( "github.com/prometheus/prometheus/rules" promRules "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/util/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/thanos-io/objstore" @@ -141,7 +140,7 @@ type prepareOptions struct { } func applyPrepareOptions(t *testing.T, instanceID string, opts ...prepareOption) prepareOptions { - defaultLogger := testutil.NewLogger(t) + defaultLogger := log.Logger(utiltest.NewTestingLogger(t)) defaultLogger = log.With(defaultLogger, "instance", instanceID) defaultLogger = level.NewFilter(defaultLogger, level.AllowInfo()) diff --git a/pkg/storage/ingest/reader_test.go b/pkg/storage/ingest/reader_test.go index 32a0cabc150..0f91038f244 100644 --- a/pkg/storage/ingest/reader_test.go +++ b/pkg/storage/ingest/reader_test.go @@ -18,7 +18,6 @@ import ( "github.com/grafana/dskit/test" "github.com/prometheus/client_golang/prometheus" promtest "github.com/prometheus/client_golang/prometheus/testutil" - "github.com/prometheus/prometheus/util/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/twmb/franz-go/pkg/kadm" @@ -314,7 +313,7 @@ func TestPartitionReader_WaitReadConsistencyUntilLastProducedOffset_And_WaitRead # TYPE cortex_ingest_storage_strong_consistency_requests_total counter cortex_ingest_storage_strong_consistency_requests_total{component="partition-reader", with_offset="%t"} 1 cortex_ingest_storage_strong_consistency_requests_total{component="partition-reader", with_offset="%t"} 0 - + # HELP cortex_ingest_storage_strong_consistency_failures_total Total number of failures while waiting for strong consistency to be enforced. # TYPE cortex_ingest_storage_strong_consistency_failures_total counter cortex_ingest_storage_strong_consistency_failures_total{component="partition-reader"} 0 @@ -364,7 +363,7 @@ func TestPartitionReader_WaitReadConsistencyUntilLastProducedOffset_And_WaitRead # TYPE cortex_ingest_storage_strong_consistency_requests_total counter cortex_ingest_storage_strong_consistency_requests_total{component="partition-reader", with_offset="%t"} 1 cortex_ingest_storage_strong_consistency_requests_total{component="partition-reader", with_offset="%t"} 0 - + # HELP cortex_ingest_storage_strong_consistency_failures_total Total number of failures while waiting for strong consistency to be enforced. # TYPE cortex_ingest_storage_strong_consistency_failures_total counter cortex_ingest_storage_strong_consistency_failures_total{component="partition-reader"} 1 @@ -417,7 +416,7 @@ func TestPartitionReader_WaitReadConsistencyUntilLastProducedOffset_And_WaitRead # TYPE cortex_ingest_storage_strong_consistency_requests_total counter cortex_ingest_storage_strong_consistency_requests_total{component="partition-reader", with_offset="%t"} 1 cortex_ingest_storage_strong_consistency_requests_total{component="partition-reader", with_offset="%t"} 0 - + # HELP cortex_ingest_storage_strong_consistency_failures_total Total number of failures while waiting for strong consistency to be enforced. # TYPE cortex_ingest_storage_strong_consistency_failures_total counter cortex_ingest_storage_strong_consistency_failures_total{component="partition-reader"} 1 @@ -454,7 +453,7 @@ func TestPartitionReader_WaitReadConsistencyUntilLastProducedOffset_And_WaitRead # TYPE cortex_ingest_storage_strong_consistency_requests_total counter cortex_ingest_storage_strong_consistency_requests_total{component="partition-reader", with_offset="%t"} 1 cortex_ingest_storage_strong_consistency_requests_total{component="partition-reader", with_offset="%t"} 0 - + # HELP cortex_ingest_storage_strong_consistency_failures_total Total number of failures while waiting for strong consistency to be enforced. # TYPE cortex_ingest_storage_strong_consistency_failures_total counter cortex_ingest_storage_strong_consistency_failures_total{component="partition-reader"} 0 @@ -2063,7 +2062,7 @@ func TestPartitionCommitter(t *testing.T) { return res, nil, true }) - logger := testutil.NewLogger(t) + logger := mimirtest.NewTestingLogger(t) cfg := createTestKafkaConfig(clusterAddr, topicName) client, err := kgo.NewClient(commonKafkaClientOptions(cfg, nil, logger)...) require.NoError(t, err) @@ -2204,7 +2203,7 @@ func TestPartitionCommitter_commit(t *testing.T) { func newKafkaProduceClient(t *testing.T, addrs string) *kgo.Client { writeClient, err := kgo.NewClient( kgo.SeedBrokers(addrs), - kgo.WithLogger(NewKafkaLogger(testutil.NewLogger(t))), + kgo.WithLogger(NewKafkaLogger(mimirtest.NewTestingLogger(t))), // We will choose the partition of each record. kgo.RecordPartitioner(kgo.ManualPartitioner()), ) diff --git a/pkg/storage/tsdb/block/block_generator.go b/pkg/storage/tsdb/block/block_generator.go index d88ab990c4e..752bbdb3b8a 100644 --- a/pkg/storage/tsdb/block/block_generator.go +++ b/pkg/storage/tsdb/block/block_generator.go @@ -19,6 +19,7 @@ import ( "github.com/grafana/dskit/runutil" "github.com/oklog/ulid" "github.com/pkg/errors" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" @@ -273,7 +274,7 @@ func CreateBlock( if err := g.Wait(); err != nil { return id, err } - c, err := tsdb.NewLeveledCompactor(ctx, nil, log.NewNopLogger(), []int64{maxt - mint}, nil, nil) + c, err := tsdb.NewLeveledCompactor(ctx, nil, promslog.NewNopLogger(), []int64{maxt - mint}, nil, nil) if err != nil { return id, errors.Wrap(err, "create compactor") } diff --git a/pkg/storage/tsdb/block/index.go b/pkg/storage/tsdb/block/index.go index ddc08435ed9..c81c99f230f 100644 --- a/pkg/storage/tsdb/block/index.go +++ b/pkg/storage/tsdb/block/index.go @@ -29,6 +29,8 @@ import ( "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/index" "golang.org/x/exp/slices" + + util_log "github.com/grafana/mimir/pkg/util/log" ) // VerifyBlock does a full run over a block index and chunk data and verifies that they fulfill the order invariants. @@ -421,7 +423,7 @@ func Repair(ctx context.Context, logger log.Logger, dir string, id ulid.ULID, so return resid, errors.New("cannot repair downsampled block") } - b, err := tsdb.OpenBlock(logger, bdir, nil) + b, err := tsdb.OpenBlock(util_log.SlogFromGoKit(logger), bdir, nil) if err != nil { return resid, errors.Wrap(err, "open block") } diff --git a/pkg/storegateway/bucket_stores_test.go b/pkg/storegateway/bucket_stores_test.go index 8647a093e10..f74a637070f 100644 --- a/pkg/storegateway/bucket_stores_test.go +++ b/pkg/storegateway/bucket_stores_test.go @@ -25,6 +25,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/tsdb" @@ -651,7 +652,7 @@ func generateStorageBlock(t *testing.T, storageDir, userID string, metricName st // then it will be snapshotted to the storage directory. tmpDir := t.TempDir() - db, err := tsdb.Open(tmpDir, log.NewNopLogger(), nil, tsdb.DefaultOptions(), nil) + db, err := tsdb.Open(tmpDir, promslog.NewNopLogger(), nil, tsdb.DefaultOptions(), nil) require.NoError(t, err) defer func() { require.NoError(t, db.Close()) diff --git a/pkg/storegateway/bucket_test.go b/pkg/storegateway/bucket_test.go index 98d658d38ef..9b3bb195bb6 100644 --- a/pkg/storegateway/bucket_test.go +++ b/pkg/storegateway/bucket_test.go @@ -37,6 +37,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" promtest "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" @@ -1149,7 +1150,7 @@ func appendTestSeries(series int) func(testing.TB, func() storage.Appender) { func createBlockFromHead(t testing.TB, dir string, head *tsdb.Head) ulid.ULID { // Put a 3 MiB limit on segment files so we can test with many segment files without creating too big blocks. - compactor, err := tsdb.NewLeveledCompactorWithChunkSize(context.Background(), nil, log.NewNopLogger(), []int64{1000000}, nil, 3*1024*1024, nil) + compactor, err := tsdb.NewLeveledCompactorWithChunkSize(context.Background(), nil, promslog.NewNopLogger(), []int64{1000000}, nil, 3*1024*1024, nil) assert.NoError(t, err) assert.NoError(t, os.MkdirAll(dir, 0777)) diff --git a/pkg/storegateway/prometheus_test.go b/pkg/storegateway/prometheus_test.go index e8a225ec215..ac3f5db2653 100644 --- a/pkg/storegateway/prometheus_test.go +++ b/pkg/storegateway/prometheus_test.go @@ -6,7 +6,7 @@ import ( "context" "testing" - "github.com/go-kit/log" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" promtsdb "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunks" @@ -18,7 +18,7 @@ import ( ) func openPromBlocks(t testing.TB, dir string) []promtsdb.BlockReader { - promDB, err := promtsdb.OpenDBReadOnly(dir, "", log.NewNopLogger()) + promDB, err := promtsdb.OpenDBReadOnly(dir, "", promslog.NewNopLogger()) require.NoError(t, err) promBlocks, err := promDB.Blocks() require.NoError(t, err) diff --git a/pkg/util/log/log.go b/pkg/util/log/log.go index caa398f7be9..d5b9614fd83 100644 --- a/pkg/util/log/log.go +++ b/pkg/util/log/log.go @@ -54,21 +54,49 @@ func InitLogger(logFormat string, logLevel dslog.Level, buffered bool, rateLimit return logger } +type logLevel int + +const ( + debugLevel logLevel = iota + infoLevel + warnLevel + errorLevel +) + +type leveledLogger interface { + level() logLevel +} + +var _ leveledLogger = levelFilter{} + // Pass through Logger and implement the DebugEnabled interface that spanlogger looks for. type levelFilter struct { log.Logger - debugEnabled bool + lvl dslog.Level } func newFilter(logger log.Logger, lvl dslog.Level) log.Logger { return &levelFilter{ - Logger: level.NewFilter(logger, lvl.Option), - debugEnabled: lvl.String() == "debug", // Using inside knowledge about the hierarchy of possible options. + Logger: level.NewFilter(logger, lvl.Option), + lvl: lvl, + } +} + +func (f levelFilter) level() logLevel { + switch f.lvl.String() { + case "info": + return infoLevel + case "warn": + return warnLevel + case "error": + return errorLevel + default: + return debugLevel } } func (f *levelFilter) DebugEnabled() bool { - return f.debugEnabled + return f.level() <= debugLevel } func getWriter(buffered bool) io.Writer { diff --git a/pkg/util/log/slogadapter.go b/pkg/util/log/slogadapter.go new file mode 100644 index 00000000000..9653ed2bdfb --- /dev/null +++ b/pkg/util/log/slogadapter.go @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package log + +import ( + "context" + "log/slog" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" +) + +// SlogFromGoKit returns slog adapter for logger. +func SlogFromGoKit(logger log.Logger) *slog.Logger { + return slog.New(goKitHandler{logger: logger}) +} + +var _ slog.Handler = goKitHandler{} + +type group struct { + name string + attrs []slog.Attr +} + +type goKitHandler struct { + logger log.Logger + group string +} + +func (h goKitHandler) Enabled(_ context.Context, level slog.Level) bool { + x, ok := h.logger.(leveledLogger) + if !ok { + return true + } + + l := x.level() + switch level { + case slog.LevelInfo: + return l <= infoLevel + case slog.LevelWarn: + return l <= warnLevel + case slog.LevelError: + return l <= errorLevel + default: + return l <= debugLevel + } +} + +func (h goKitHandler) Handle(_ context.Context, record slog.Record) error { + var logger log.Logger + switch record.Level { + case slog.LevelInfo: + logger = level.Info(h.logger) + case slog.LevelWarn: + logger = level.Warn(h.logger) + case slog.LevelError: + logger = level.Error(h.logger) + default: + logger = level.Debug(h.logger) + } + + if h.group == "" { + pairs := make([]any, 0, record.NumAttrs()+2) + pairs = append(pairs, "msg", record.Message) + record.Attrs(func(attr slog.Attr) bool { + pairs = append(pairs, attr.Key, attr.Value) + return true + }) + return logger.Log(pairs...) + } + + pairs := make([]any, 0, record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + pairs = append(pairs, attr.Key, attr.Value) + return true + }) + g := slog.Group(h.group, pairs...) + pairs = []any{"msg", record.Message, g.Key, g.Value} + return logger.Log(pairs...) +} + +func (h goKitHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + pairs := make([]any, 0, len(attrs)) + for _, attr := range attrs { + pairs = append(pairs, attr.Key, attr.Value) + } + + if h.group == "" { + return goKitHandler{logger: log.With(h.logger, pairs...)} + } + + g := slog.Group(h.group, pairs...) + return goKitHandler{logger: log.With(h.logger, g.Key, g.Value)} +} + +func (h goKitHandler) WithGroup(name string) slog.Handler { + if name == "" { + return h + } + + h.group = name + return h +} diff --git a/pkg/util/log/slogadapter_test.go b/pkg/util/log/slogadapter_test.go new file mode 100644 index 00000000000..66a6d830f37 --- /dev/null +++ b/pkg/util/log/slogadapter_test.go @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package log + +import ( + "log/slog" + "testing" + + "github.com/go-kit/log/level" + dslog "github.com/grafana/dskit/log" +) + +func TestSlogFromGoKit(t *testing.T) { + var lvl dslog.Level + lvl.Set("debug") + mLogger := &mockLogger{} + logger := newFilter(mLogger, lvl) + slogger := SlogFromGoKit(logger) + + levels := []level.Value{ + level.DebugValue(), + level.InfoValue(), + level.WarnValue(), + level.ErrorValue(), + } + + for _, l := range levels { + mLogger.On("Log", level.Key(), l, "msg", "test", "attr", slog.StringValue("value")).Times(1).Return(nil) + } + + for _, l := range levels { + attrs := []any{"attr", "value"} + switch l { + case level.DebugValue(): + slogger.Debug("test", attrs...) + case level.InfoValue(): + slogger.Info("test", attrs...) + case level.WarnValue(): + slogger.Warn("test", attrs...) + case level.ErrorValue(): + slogger.Error("test", attrs...) + } + } + + t.Run("warn level", func(t *testing.T) { + var lvl dslog.Level + lvl.Set("warn") + mLogger := &mockLogger{} + logger := newFilter(mLogger, lvl) + slogger := SlogFromGoKit(logger) + + levels := []level.Value{ + level.DebugValue(), + level.InfoValue(), + level.WarnValue(), + level.ErrorValue(), + } + + mLogger.On("Log", level.Key(), level.WarnValue(), "msg", "test", "attr", slog.StringValue("value")).Times(1).Return(nil) + mLogger.On("Log", level.Key(), level.ErrorValue(), "msg", "test", "attr", slog.StringValue("value")).Times(1).Return(nil) + + for _, l := range levels { + attrs := []any{"attr", "value"} + switch l { + case level.DebugValue(): + slogger.Debug("test", attrs...) + case level.InfoValue(): + slogger.Info("test", attrs...) + case level.WarnValue(): + slogger.Warn("test", attrs...) + case level.ErrorValue(): + slogger.Error("test", attrs...) + } + } + }) + + t.Run("with attrs", func(t *testing.T) { + var lvl dslog.Level + lvl.Set("debug") + mLogger := &mockLogger{} + logger := newFilter(mLogger, lvl) + slogger := SlogFromGoKit(logger).With("extra", "attr") + + mLogger.On( + "Log", + level.Key(), level.DebugValue(), + "extra", slog.StringValue("attr"), + "msg", "test", "attr", slog.StringValue("value"), + ).Times(1).Return(nil) + + slogger.Debug("test", "attr", "value") + }) + + t.Run("with non-empty group", func(t *testing.T) { + var lvl dslog.Level + lvl.Set("debug") + mLogger := &mockLogger{} + logger := newFilter(mLogger, lvl) + slogger := SlogFromGoKit(logger) + + g := slog.Group("test-group", "attr", slog.StringValue("value")) + mLogger.On( + "Log", + level.Key(), level.DebugValue(), + "msg", "test", + "test-group", g.Value, + ).Times(1).Return(nil) + + slogger.WithGroup("test-group").Debug("test", "attr", "value") + }) +} diff --git a/tools/splitblocks/main_test.go b/tools/splitblocks/main_test.go index 4b60dc3192b..e3668fd68fb 100644 --- a/tools/splitblocks/main_test.go +++ b/tools/splitblocks/main_test.go @@ -12,6 +12,7 @@ import ( "github.com/go-kit/log" "github.com/grafana/dskit/runutil" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" @@ -200,7 +201,7 @@ func buildSeriesSpec(startOfDay time.Time) []*block.SeriesSpec { } func listSeriesAndChunksFromBlock(t *testing.T, blockDir string) []*block.SeriesSpec { - blk, err := tsdb.OpenBlock(log.NewNopLogger(), blockDir, nil) + blk, err := tsdb.OpenBlock(promslog.NewNopLogger(), blockDir, nil) require.NoError(t, err) chunkReader, err := blk.Chunks() require.NoError(t, err) diff --git a/tools/tsdb-compact/main.go b/tools/tsdb-compact/main.go index e04bd6d45cb..efcad971f9c 100644 --- a/tools/tsdb-compact/main.go +++ b/tools/tsdb-compact/main.go @@ -15,6 +15,8 @@ import ( golog "github.com/go-kit/log" "github.com/grafana/dskit/flagext" "github.com/prometheus/prometheus/tsdb" + + util_log "github.com/grafana/mimir/pkg/util/log" ) func main() { @@ -55,7 +57,7 @@ func main() { blockDirs = append(blockDirs, d) - b, err := tsdb.OpenBlock(logger, d, nil) + b, err := tsdb.OpenBlock(util_log.SlogFromGoKit(logger), d, nil) if err != nil { log.Fatalln("failed to open block:", d, err) } @@ -86,7 +88,7 @@ func main() { ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM) defer cancel() - c, err := tsdb.NewLeveledCompactorWithChunkSize(ctx, nil, logger, []int64{0}, nil, segmentSizeMB*1024*1024, nil) + c, err := tsdb.NewLeveledCompactorWithChunkSize(ctx, nil, util_log.SlogFromGoKit(logger), []int64{0}, nil, segmentSizeMB*1024*1024, nil) if err != nil { log.Fatalln("creating compator", err) } diff --git a/tools/tsdb-gaps/main.go b/tools/tsdb-gaps/main.go index d91865b4380..eba3f51b31a 100644 --- a/tools/tsdb-gaps/main.go +++ b/tools/tsdb-gaps/main.go @@ -18,6 +18,8 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" + + util_log "github.com/grafana/mimir/pkg/util/log" ) const ( @@ -213,7 +215,7 @@ func main() { func analyzeBlockForGaps(ctx context.Context, cfg config, blockDir string, matchers []*labels.Matcher) (blockGapStats, error) { var blockStats blockGapStats blockStats.BlockID = blockDir - b, err := tsdb.OpenBlock(logger, blockDir, nil) + b, err := tsdb.OpenBlock(util_log.SlogFromGoKit(logger), blockDir, nil) if err != nil { return blockStats, fmt.Errorf("failed to open block: %w", err) } diff --git a/tools/tsdb-print-chunk/main.go b/tools/tsdb-print-chunk/main.go index 7e860a0df2b..2db173031f5 100644 --- a/tools/tsdb-print-chunk/main.go +++ b/tools/tsdb-print-chunk/main.go @@ -14,6 +14,8 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" + + util_log "github.com/grafana/mimir/pkg/util/log" ) var logger = log.NewLogfmtLogger(os.Stderr) @@ -30,7 +32,7 @@ func main() { } func printChunks(blockDir string, chunkRefs []string) { - b, err := tsdb.OpenBlock(logger, blockDir, nil) + b, err := tsdb.OpenBlock(util_log.SlogFromGoKit(logger), blockDir, nil) if err != nil { fmt.Fprintln(os.Stderr, "Failed to open TSDB block", blockDir, "due to error:", err) os.Exit(1) diff --git a/tools/tsdb-series/main.go b/tools/tsdb-series/main.go index 3bfc0469036..2650042788f 100644 --- a/tools/tsdb-series/main.go +++ b/tools/tsdb-series/main.go @@ -21,6 +21,8 @@ import ( "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/index" + + util_log "github.com/grafana/mimir/pkg/util/log" ) var logger = log.NewLogfmtLogger(os.Stderr) @@ -104,7 +106,7 @@ type seriesWithStats struct { } func printBlockIndex(ctx context.Context, blockDir string, printChunks bool, seriesStats bool, matchers []*labels.Matcher, minTime time.Time, maxTime time.Time, jsonOutput bool) { - block, err := tsdb.OpenBlock(logger, blockDir, nil) + block, err := tsdb.OpenBlock(util_log.SlogFromGoKit(logger), blockDir, nil) if err != nil { level.Error(logger).Log("msg", "failed to open block", "dir", blockDir, "err", err) return diff --git a/tools/tsdb-symbols/main.go b/tools/tsdb-symbols/main.go index a4d9979f1e1..fabdbc057d1 100644 --- a/tools/tsdb-symbols/main.go +++ b/tools/tsdb-symbols/main.go @@ -9,11 +9,11 @@ import ( "fmt" "io" "log" + "log/slog" "os" "path/filepath" "time" - gokitlog "github.com/go-kit/log" "github.com/grafana/dskit/flagext" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb" @@ -90,7 +90,7 @@ func main() { } func analyseSymbols(ctx context.Context, blockDir string, uniqueSymbols map[string]struct{}, uniqueSymbolsPerShard []map[string]struct{}) error { - block, err := tsdb.OpenBlock(gokitlog.NewLogfmtLogger(os.Stderr), blockDir, nil) + block, err := tsdb.OpenBlock(slog.New(slog.NewTextHandler(os.Stderr, nil)), blockDir, nil) if err != nil { return fmt.Errorf("failed to open block: %v", err) } diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md index 5584c350b0a..dcdafb0a47f 100644 --- a/vendor/cloud.google.com/go/auth/CHANGES.md +++ b/vendor/cloud.google.com/go/auth/CHANGES.md @@ -1,5 +1,21 @@ # Changelog +## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25) + + +### Bug Fixes + +* **auth:** Restore support for GOOGLE_CLOUD_UNIVERSE_DOMAIN env ([#10915](https://github.com/googleapis/google-cloud-go/issues/10915)) ([94caaaa](https://github.com/googleapis/google-cloud-go/commit/94caaaa061362d0e00ef6214afcc8a0a3e7ebfb2)) +* **auth:** Skip directpath credentials overwrite when it's not on GCE ([#10833](https://github.com/googleapis/google-cloud-go/issues/10833)) ([7e5e8d1](https://github.com/googleapis/google-cloud-go/commit/7e5e8d10b761b0a6e43e19a028528db361bc07b1)) +* **auth:** Use new context for non-blocking token refresh ([#10919](https://github.com/googleapis/google-cloud-go/issues/10919)) ([cf7102d](https://github.com/googleapis/google-cloud-go/commit/cf7102d33a21be1e5a9d47a49456b3a57c43b350)) + +## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11) + + +### Bug Fixes + +* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6)) + ## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03) diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md index 36de276a074..6fe4f0763e3 100644 --- a/vendor/cloud.google.com/go/auth/README.md +++ b/vendor/cloud.google.com/go/auth/README.md @@ -1,4 +1,40 @@ -# auth +# Google Auth Library for Go -This module is currently EXPERIMENTAL and under active development. It is not -yet intended to be used. +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/auth.svg)](https://pkg.go.dev/cloud.google.com/go/auth) + +## Install + +``` bash +go get cloud.google.com/go/auth@latest +``` + +## Usage + +The most common way this library is used is transitively, by default, from any +of our Go client libraries. + +### Notable use-cases + +- To create a credential directly please see examples in the + [credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials) + package. +- To create a authenticated HTTP client please see examples in the + [httptransport](https://pkg.go.dev/cloud.google.com/go/auth/httptransport) + package. +- To create a authenticated gRPC connection please see examples in the + [grpctransport](https://pkg.go.dev/cloud.google.com/go/auth/grpctransport) + package. +- To create an ID token please see examples in the + [idtoken](https://pkg.go.dev/cloud.google.com/go/auth/credentials/idtoken) + package. + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go index bc37ea85fb5..32fb058df0c 100644 --- a/vendor/cloud.google.com/go/auth/auth.go +++ b/vendor/cloud.google.com/go/auth/auth.go @@ -328,7 +328,9 @@ func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, err defer c.mu.Unlock() return c.cachedToken, nil case stale: - c.tokenAsync(ctx) + // Call tokenAsync with a new Context because the user-provided context + // may have a short timeout incompatible with async token refresh. + c.tokenAsync(context.Background()) // Return the stale token immediately to not block customer requests to Cloud services. c.mu.Lock() defer c.mu.Unlock() diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go index cf56b025a23..6591b181132 100644 --- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go +++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go @@ -124,8 +124,14 @@ func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string } func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain) if opts.UseSelfSignedJWT { return configureSelfSignedJWT(f, opts) + } else if ud != "" && ud != internalauth.DefaultUniverseDomain { + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs. + opts.UseSelfSignedJWT = true + return configureSelfSignedJWT(f, opts) } opts2LO := &auth.Options2LO{ Email: f.ClientEmail, diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go index b62a8ae4d5d..6ae29de6c27 100644 --- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go +++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go @@ -17,6 +17,7 @@ package credentials import ( "context" "crypto/rsa" + "errors" "fmt" "strings" "time" @@ -35,6 +36,9 @@ var ( // configureSelfSignedJWT uses the private key in the service account to create // a JWT without making a network call. func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) { + if len(opts.scopes()) == 0 && opts.Audience == "" { + return nil, errors.New("credentials: both scopes and audience are empty") + } pk, err := internal.ParseKey([]byte(f.PrivateKey)) if err != nil { return nil, fmt.Errorf("credentials: could not parse key: %w", err) diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go index efc91c2b0c3..69c6f27f04e 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go @@ -22,7 +22,7 @@ import ( "strings" "cloud.google.com/go/auth" - "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/auth/internal/compute" "google.golang.org/grpc" grpcgoogle "google.golang.org/grpc/credentials/google" ) @@ -91,7 +91,7 @@ func isDirectPathXdsUsed(o *Options) bool { // configuration allows the use of direct path. If it does not the provided // grpcOpts and endpoint are returned. func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) { - if isDirectPathEnabled(endpoint, opts) && metadata.OnGCE() && isTokenProviderDirectPathCompatible(creds, opts) { + if isDirectPathEnabled(endpoint, opts) && compute.OnComputeEngine() && isTokenProviderDirectPathCompatible(creds, opts) { // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. grpcOpts = []grpc.DialOption{ grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))} diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go index 21488e29f0b..9d959af74c6 100644 --- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go +++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "net/http" + "os" "cloud.google.com/go/auth" "cloud.google.com/go/auth/credentials" @@ -330,15 +331,23 @@ type grpcCredentialsProvider struct { clientUniverseDomain string } -// getClientUniverseDomain returns the default service domain for a given Cloud universe. -// The default value is "googleapis.com". This is the universe domain -// configured for the client, which will be compared to the universe domain -// that is separately configured for the credentials. +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. func (c *grpcCredentialsProvider) getClientUniverseDomain() string { - if c.clientUniverseDomain == "" { - return internal.DefaultUniverseDomain + if c.clientUniverseDomain != "" { + return c.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD } - return c.clientUniverseDomain + return internal.DefaultUniverseDomain } func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go index 274bb01254c..93c0b1369fe 100644 --- a/vendor/cloud.google.com/go/auth/httptransport/transport.go +++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go @@ -19,6 +19,7 @@ import ( "crypto/tls" "net" "net/http" + "os" "time" "cloud.google.com/go/auth" @@ -178,13 +179,23 @@ type authTransport struct { clientUniverseDomain string } -// getClientUniverseDomain returns the universe domain configured for the client. -// The default value is "googleapis.com". +// getClientUniverseDomain returns the default service domain for a given Cloud +// universe, with the following precedence: +// +// 1. A non-empty option.WithUniverseDomain or similar client option. +// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN. +// 3. The default value "googleapis.com". +// +// This is the universe domain configured for the client, which will be compared +// to the universe domain that is separately configured for the credentials. func (t *authTransport) getClientUniverseDomain() string { - if t.clientUniverseDomain == "" { - return internal.DefaultUniverseDomain + if t.clientUniverseDomain != "" { + return t.clientUniverseDomain + } + if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" { + return envUD } - return t.clientUniverseDomain + return internal.DefaultUniverseDomain } // RoundTrip authorizes and authenticates the request with an diff --git a/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/vendor/cloud.google.com/go/auth/internal/compute/compute.go new file mode 100644 index 00000000000..651bd61fbbc --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/compute.go @@ -0,0 +1,66 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "log" + "runtime" + "strings" + "sync" +) + +var ( + vmOnGCEOnce sync.Once + vmOnGCE bool +) + +// OnComputeEngine returns whether the client is running on GCE. +// +// This is a copy of the gRPC internal googlecloud.OnGCE() func at: +// https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go +// The functionality is similar to the metadata.OnGCE() func at: +// https://github.com/xmenxk/google-cloud-go/blob/main/compute/metadata/metadata.go +// +// The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server. +// In particular, OnComputeEngine() will return false on Serverless. +func OnComputeEngine() bool { + vmOnGCEOnce.Do(func() { + mf, err := manufacturer() + if err != nil { + log.Printf("Failed to read manufacturer, vmOnGCE=false: %v", err) + return + } + vmOnGCE = isRunningOnGCE(mf, runtime.GOOS) + }) + return vmOnGCE +} + +// isRunningOnGCE checks whether the local system, without doing a network request, is +// running on GCP. +func isRunningOnGCE(manufacturer []byte, goos string) bool { + name := string(manufacturer) + switch goos { + case "linux": + name = strings.TrimSpace(name) + return name == "Google" || name == "Google Compute Engine" + case "windows": + name = strings.Replace(name, " ", "", -1) + name = strings.Replace(name, "\n", "", -1) + name = strings.Replace(name, "\r", "", -1) + return name == "Google" + default: + return false + } +} diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go new file mode 100644 index 00000000000..af490bf4f49 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go @@ -0,0 +1,22 @@ +//go:build !(linux || windows) +// +build !linux,!windows + +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +func manufacturer() ([]byte, error) { + return nil, nil +} diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/logging.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go similarity index 53% rename from vendor/github.com/prometheus/prometheus/util/testutil/logging.go rename to vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go index db096ea2342..d92178df86c 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/logging.go +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go @@ -1,9 +1,10 @@ -// Copyright 2019 The Prometheus Authors +// Copyright 2024 Google LLC +// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -11,25 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -package testutil - -import ( - "testing" +package compute - "github.com/go-kit/log" -) +import "os" -type logger struct { - t *testing.T -} - -// NewLogger returns a gokit compatible Logger which calls t.Log. -func NewLogger(t *testing.T) log.Logger { - return logger{t: t} -} +const linuxProductNameFile = "/sys/class/dmi/id/product_name" -// Log implements log.Logger. -func (t logger) Log(keyvals ...interface{}) error { - t.t.Log(keyvals...) - return nil +func manufacturer() ([]byte, error) { + return os.ReadFile(linuxProductNameFile) } diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go new file mode 100644 index 00000000000..16be9df3064 --- /dev/null +++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go @@ -0,0 +1,46 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compute + +import ( + "errors" + "os/exec" + "regexp" + "strings" +) + +const ( + windowsCheckCommand = "powershell.exe" + windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS" + powershellOutputFilter = "Manufacturer" + windowsManufacturerRegex = ":(.*)" +) + +func manufacturer() ([]byte, error) { + cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs) + out, err := cmd.Output() + if err != nil { + return nil, err + } + for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") { + if strings.HasPrefix(line, powershellOutputFilter) { + re := regexp.MustCompile(windowsManufacturerRegex) + name := re.FindString(line) + name = strings.TrimLeft(name, ":") + return []byte(name), nil + } + } + return nil, errors.New("cannot determine the machine's manufacturer") +} diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go index 4308345eda3..66a51f19c73 100644 --- a/vendor/cloud.google.com/go/auth/internal/internal.go +++ b/vendor/cloud.google.com/go/auth/internal/internal.go @@ -38,8 +38,11 @@ const ( // QuotaProjectEnvVar is the environment variable for setting the quota // project. QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT" - projectEnvVar = "GOOGLE_CLOUD_PROJECT" - maxBodySize = 1 << 20 + // UniverseDomainEnvVar is the environment variable for setting the default + // service domain for a given Cloud universe. + UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN" + projectEnvVar = "GOOGLE_CLOUD_PROJECT" + maxBodySize = 1 << 20 // DefaultUniverseDomain is the default value for universe domain. // Universe domain is the default service domain for a given Cloud universe. diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 9594e1e2793..da7db19b1c6 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,19 @@ # Changes +## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20) + + +### Bug Fixes + +* **compute/metadata:** Close Response Body for failed request ([#10891](https://github.com/googleapis/google-cloud-go/issues/10891)) ([e91d45e](https://github.com/googleapis/google-cloud-go/commit/e91d45e4757a9e354114509ba9800085d9e0ff1f)) + +## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12) + + +### Bug Fixes + +* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd)) + ## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 345080b7297..c160b4786bb 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -456,6 +456,9 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string code = res.StatusCode } if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry { + if res != nil && res.Body != nil { + res.Body.Close() + } if err := sleep(ctx, delay); err != nil { return "", "", err } diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go index bb412f8917e..2e53f012300 100644 --- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go +++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go @@ -17,10 +17,15 @@ package metadata -import "syscall" +import ( + "errors" + "syscall" +) func init() { // Initialize syscallRetryable to return true on transient socket-level // errors. These errors are specific to Linux. - syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED } + syscallRetryable = func(err error) bool { + return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED) + } } diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go index a1551dbc877..8eb8b08c600 100644 --- a/vendor/github.com/golang/glog/glog_file.go +++ b/vendor/github.com/golang/glog/glog_file.go @@ -26,7 +26,6 @@ import ( "fmt" "io" "os" - "os/user" "path/filepath" "runtime" "strings" @@ -68,9 +67,8 @@ func init() { host = shortHostname(h) } - current, err := user.Current() - if err == nil { - userName = current.Username + if u := lookupUser(); u != "" { + userName = u } // Sanitize userName since it is used to construct file paths. userName = strings.Map(func(r rune) rune { diff --git a/vendor/github.com/golang/glog/glog_file_nonwindows.go b/vendor/github.com/golang/glog/glog_file_nonwindows.go new file mode 100644 index 00000000000..d5cdb793c54 --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file_nonwindows.go @@ -0,0 +1,12 @@ +//go:build !windows + +package glog + +import "os/user" + +func lookupUser() string { + if current, err := user.Current(); err == nil { + return current.Username + } + return "" +} diff --git a/vendor/github.com/golang/glog/glog_file_windows.go b/vendor/github.com/golang/glog/glog_file_windows.go new file mode 100644 index 00000000000..a9e4f609dfb --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file_windows.go @@ -0,0 +1,30 @@ +//go:build windows + +package glog + +import ( + "syscall" +) + +// This follows the logic in the standard library's user.Current() function, except +// that it leaves out the potentially expensive calls required to look up the user's +// display name in Active Directory. +func lookupUser() string { + token, err := syscall.OpenCurrentProcessToken() + if err != nil { + return "" + } + defer token.Close() + tokenUser, err := token.GetTokenUser() + if err != nil { + return "" + } + username, _, accountType, err := tokenUser.User.Sid.LookupAccount("") + if err != nil { + return "" + } + if accountType != syscall.SidTypeUser { + return "" + } + return username +} diff --git a/vendor/github.com/mdlayher/socket/CHANGELOG.md b/vendor/github.com/mdlayher/socket/CHANGELOG.md new file mode 100644 index 00000000000..f0d01641a26 --- /dev/null +++ b/vendor/github.com/mdlayher/socket/CHANGELOG.md @@ -0,0 +1,80 @@ +# CHANGELOG + +## v0.4.1 + +- [Bug Fix] [commit](https://github.com/mdlayher/socket/commit/2a14ceef4da279de1f957c5761fffcc6c87bbd3b): + ensure `socket.Conn` can be used with non-socket file descriptors by handling + `ENOTSOCK` in the constructor. + +## v0.4.0 + +**This is the first release of package socket that only supports Go 1.18+. +Users on older versions of Go must use v0.3.0.** + +- [Improvement]: drop support for older versions of Go so we can begin using + modern versions of `x/sys` and other dependencies. + +## v0.3.0 + +**This is the last release of package socket that supports Go 1.17 and below.** + +- [New API/API change] [PR](https://github.com/mdlayher/socket/pull/8): + numerous `socket.Conn` methods now support context cancelation. Future + releases will continue adding support as needed. + - New `ReadContext` and `WriteContext` methods. + - `Connect`, `Recvfrom`, `Recvmsg`, `Sendmsg`, and `Sendto` methods now accept + a context. + - `Sendto` parameter order was also fixed to match the underlying syscall. + +## v0.2.3 + +- [New API] [commit](https://github.com/mdlayher/socket/commit/a425d96e0f772c053164f8ce4c9c825380a98086): + `socket.Conn` has new `Pidfd*` methods for wrapping the `pidfd_*(2)` family of + system calls. + +## v0.2.2 + +- [New API] [commit](https://github.com/mdlayher/socket/commit/a2429f1dfe8ec2586df5a09f50ead865276cd027): + `socket.Conn` has new `IoctlKCM*` methods for wrapping `ioctl(2)` for `AF_KCM` + operations. + +## v0.2.1 + +- [New API] [commit](https://github.com/mdlayher/socket/commit/b18ddbe9caa0e34552b4409a3aa311cb460d2f99): + `socket.Conn` has a new `SetsockoptPacketMreq` method for wrapping + `setsockopt(2)` for `AF_PACKET` socket options. + +## v0.2.0 + +- [New API] [commit](https://github.com/mdlayher/socket/commit/6e912a68523c45e5fd899239f4b46c402dd856da): + `socket.FileConn` can be used to create a `socket.Conn` from an existing + `os.File`, which may be provided by systemd socket activation or another + external mechanism. +- [API change] [commit](https://github.com/mdlayher/socket/commit/66d61f565188c23fe02b24099ddc856d538bf1a7): + `socket.Conn.Connect` now returns the `unix.Sockaddr` value provided by + `getpeername(2)`, since we have to invoke that system call anyway to verify + that a connection to a remote peer was successfully established. +- [Bug Fix] [commit](https://github.com/mdlayher/socket/commit/b60b2dbe0ac3caff2338446a150083bde8c5c19c): + check the correct error from `unix.GetsockoptInt` in the `socket.Conn.Connect` + method. Thanks @vcabbage! + +## v0.1.2 + +- [Bug Fix]: `socket.Conn.Connect` now properly checks the `SO_ERROR` socket + option value after calling `connect(2)` to verify whether or not a connection + could successfully be established. This means that `Connect` should now report + an error for an `AF_INET` TCP connection refused or `AF_VSOCK` connection + reset by peer. +- [New API]: add `socket.Conn.Getpeername` for use in `Connect`, but also for + use by external callers. + +## v0.1.1 + +- [New API]: `socket.Conn` now has `CloseRead`, `CloseWrite`, and `Shutdown` + methods. +- [Improvement]: internal rework to more robustly handle various errors. + +## v0.1.0 + +- Initial unstable release. Most functionality has been developed and ported +from package [`netlink`](https://github.com/mdlayher/netlink). diff --git a/vendor/github.com/mdlayher/socket/LICENSE.md b/vendor/github.com/mdlayher/socket/LICENSE.md new file mode 100644 index 00000000000..3ccdb75b26d --- /dev/null +++ b/vendor/github.com/mdlayher/socket/LICENSE.md @@ -0,0 +1,9 @@ +# MIT License + +Copyright (C) 2021 Matt Layher + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mdlayher/socket/README.md b/vendor/github.com/mdlayher/socket/README.md new file mode 100644 index 00000000000..2aa065cbb7c --- /dev/null +++ b/vendor/github.com/mdlayher/socket/README.md @@ -0,0 +1,23 @@ +# socket [![Test Status](https://github.com/mdlayher/socket/workflows/Test/badge.svg)](https://github.com/mdlayher/socket/actions) [![Go Reference](https://pkg.go.dev/badge/github.com/mdlayher/socket.svg)](https://pkg.go.dev/github.com/mdlayher/socket) [![Go Report Card](https://goreportcard.com/badge/github.com/mdlayher/socket)](https://goreportcard.com/report/github.com/mdlayher/socket) + +Package `socket` provides a low-level network connection type which integrates +with Go's runtime network poller to provide asynchronous I/O and deadline +support. MIT Licensed. + +This package focuses on UNIX-like operating systems which make use of BSD +sockets system call APIs. It is meant to be used as a foundation for the +creation of operating system-specific socket packages, for socket families such +as Linux's `AF_NETLINK`, `AF_PACKET`, or `AF_VSOCK`. This package should not be +used directly in end user applications. + +Any use of package socket should be guarded by build tags, as one would also +use when importing the `syscall` or `golang.org/x/sys` packages. + +## Stability + +See the [CHANGELOG](./CHANGELOG.md) file for a description of changes between +releases. + +This package only supports the two most recent major versions of Go, mirroring +Go's own release policy. Older versions of Go may lack critical features and bug +fixes which are necessary for this package to function correctly. diff --git a/vendor/github.com/mdlayher/socket/accept.go b/vendor/github.com/mdlayher/socket/accept.go new file mode 100644 index 00000000000..47e9d897ef8 --- /dev/null +++ b/vendor/github.com/mdlayher/socket/accept.go @@ -0,0 +1,23 @@ +//go:build !dragonfly && !freebsd && !illumos && !linux +// +build !dragonfly,!freebsd,!illumos,!linux + +package socket + +import ( + "fmt" + "runtime" + + "golang.org/x/sys/unix" +) + +const sysAccept = "accept" + +// accept wraps accept(2). +func accept(fd, flags int) (int, unix.Sockaddr, error) { + if flags != 0 { + // These operating systems have no support for flags to accept(2). + return 0, nil, fmt.Errorf("socket: Conn.Accept flags are ineffective on %s", runtime.GOOS) + } + + return unix.Accept(fd) +} diff --git a/vendor/github.com/mdlayher/socket/accept4.go b/vendor/github.com/mdlayher/socket/accept4.go new file mode 100644 index 00000000000..e1016b2063d --- /dev/null +++ b/vendor/github.com/mdlayher/socket/accept4.go @@ -0,0 +1,15 @@ +//go:build dragonfly || freebsd || illumos || linux +// +build dragonfly freebsd illumos linux + +package socket + +import ( + "golang.org/x/sys/unix" +) + +const sysAccept = "accept4" + +// accept wraps accept4(2). +func accept(fd, flags int) (int, unix.Sockaddr, error) { + return unix.Accept4(fd, flags) +} diff --git a/vendor/github.com/mdlayher/socket/conn.go b/vendor/github.com/mdlayher/socket/conn.go new file mode 100644 index 00000000000..7b3cc7a6e71 --- /dev/null +++ b/vendor/github.com/mdlayher/socket/conn.go @@ -0,0 +1,880 @@ +package socket + +import ( + "context" + "errors" + "io" + "os" + "sync" + "sync/atomic" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// Lock in an expected public interface for convenience. +var _ interface { + io.ReadWriteCloser + syscall.Conn + SetDeadline(t time.Time) error + SetReadDeadline(t time.Time) error + SetWriteDeadline(t time.Time) error +} = &Conn{} + +// A Conn is a low-level network connection which integrates with Go's runtime +// network poller to provide asynchronous I/O and deadline support. +// +// Many of a Conn's blocking methods support net.Conn deadlines as well as +// cancelation via context. Note that passing a context with a deadline set will +// override any of the previous deadlines set by calls to the SetDeadline family +// of methods. +type Conn struct { + // Indicates whether or not Conn.Close has been called. Must be accessed + // atomically. Atomics definitions must come first in the Conn struct. + closed uint32 + + // A unique name for the Conn which is also associated with derived file + // descriptors such as those created by accept(2). + name string + + // facts contains information we have determined about Conn to trigger + // alternate behavior in certain functions. + facts facts + + // Provides access to the underlying file registered with the runtime + // network poller, and arbitrary raw I/O calls. + fd *os.File + rc syscall.RawConn +} + +// facts contains facts about a Conn. +type facts struct { + // isStream reports whether this is a streaming descriptor, as opposed to a + // packet-based descriptor like a UDP socket. + isStream bool + + // zeroReadIsEOF reports Whether a zero byte read indicates EOF. This is + // false for a message based socket connection. + zeroReadIsEOF bool +} + +// A Config contains options for a Conn. +type Config struct { + // NetNS specifies the Linux network namespace the Conn will operate in. + // This option is unsupported on other operating systems. + // + // If set (non-zero), Conn will enter the specified network namespace and an + // error will occur in Socket if the operation fails. + // + // If not set (zero), a best-effort attempt will be made to enter the + // network namespace of the calling thread: this means that any changes made + // to the calling thread's network namespace will also be reflected in Conn. + // If this operation fails (due to lack of permissions or because network + // namespaces are disabled by kernel configuration), Socket will not return + // an error, and the Conn will operate in the default network namespace of + // the process. This enables non-privileged use of Conn in applications + // which do not require elevated privileges. + // + // Entering a network namespace is a privileged operation (root or + // CAP_SYS_ADMIN are required), and most applications should leave this set + // to 0. + NetNS int +} + +// High-level methods which provide convenience over raw system calls. + +// Close closes the underlying file descriptor for the Conn, which also causes +// all in-flight I/O operations to immediately unblock and return errors. Any +// subsequent uses of Conn will result in EBADF. +func (c *Conn) Close() error { + // The caller has expressed an intent to close the socket, so immediately + // increment s.closed to force further calls to result in EBADF before also + // closing the file descriptor to unblock any outstanding operations. + // + // Because other operations simply check for s.closed != 0, we will permit + // double Close, which would increment s.closed beyond 1. + if atomic.AddUint32(&c.closed, 1) != 1 { + // Multiple Close calls. + return nil + } + + return os.NewSyscallError("close", c.fd.Close()) +} + +// CloseRead shuts down the reading side of the Conn. Most callers should just +// use Close. +func (c *Conn) CloseRead() error { return c.Shutdown(unix.SHUT_RD) } + +// CloseWrite shuts down the writing side of the Conn. Most callers should just +// use Close. +func (c *Conn) CloseWrite() error { return c.Shutdown(unix.SHUT_WR) } + +// Read reads directly from the underlying file descriptor. +func (c *Conn) Read(b []byte) (int, error) { return c.fd.Read(b) } + +// ReadContext reads from the underlying file descriptor with added support for +// context cancelation. +func (c *Conn) ReadContext(ctx context.Context, b []byte) (int, error) { + if c.facts.isStream && len(b) > maxRW { + b = b[:maxRW] + } + + n, err := readT(c, ctx, "read", func(fd int) (int, error) { + return unix.Read(fd, b) + }) + if n == 0 && err == nil && c.facts.zeroReadIsEOF { + return 0, io.EOF + } + + return n, os.NewSyscallError("read", err) +} + +// Write writes directly to the underlying file descriptor. +func (c *Conn) Write(b []byte) (int, error) { return c.fd.Write(b) } + +// WriteContext writes to the underlying file descriptor with added support for +// context cancelation. +func (c *Conn) WriteContext(ctx context.Context, b []byte) (int, error) { + var ( + n, nn int + err error + ) + + doErr := c.write(ctx, "write", func(fd int) error { + max := len(b) + if c.facts.isStream && max-nn > maxRW { + max = nn + maxRW + } + + n, err = unix.Write(fd, b[nn:max]) + if n > 0 { + nn += n + } + if nn == len(b) { + return err + } + if n == 0 && err == nil { + err = io.ErrUnexpectedEOF + return nil + } + + return err + }) + if doErr != nil { + return 0, doErr + } + + return nn, os.NewSyscallError("write", err) +} + +// SetDeadline sets both the read and write deadlines associated with the Conn. +func (c *Conn) SetDeadline(t time.Time) error { return c.fd.SetDeadline(t) } + +// SetReadDeadline sets the read deadline associated with the Conn. +func (c *Conn) SetReadDeadline(t time.Time) error { return c.fd.SetReadDeadline(t) } + +// SetWriteDeadline sets the write deadline associated with the Conn. +func (c *Conn) SetWriteDeadline(t time.Time) error { return c.fd.SetWriteDeadline(t) } + +// ReadBuffer gets the size of the operating system's receive buffer associated +// with the Conn. +func (c *Conn) ReadBuffer() (int, error) { + return c.GetsockoptInt(unix.SOL_SOCKET, unix.SO_RCVBUF) +} + +// WriteBuffer gets the size of the operating system's transmit buffer +// associated with the Conn. +func (c *Conn) WriteBuffer() (int, error) { + return c.GetsockoptInt(unix.SOL_SOCKET, unix.SO_SNDBUF) +} + +// SetReadBuffer sets the size of the operating system's receive buffer +// associated with the Conn. +// +// When called with elevated privileges on Linux, the SO_RCVBUFFORCE option will +// be used to override operating system limits. Otherwise SO_RCVBUF is used +// (which obeys operating system limits). +func (c *Conn) SetReadBuffer(bytes int) error { return c.setReadBuffer(bytes) } + +// SetWriteBuffer sets the size of the operating system's transmit buffer +// associated with the Conn. +// +// When called with elevated privileges on Linux, the SO_SNDBUFFORCE option will +// be used to override operating system limits. Otherwise SO_SNDBUF is used +// (which obeys operating system limits). +func (c *Conn) SetWriteBuffer(bytes int) error { return c.setWriteBuffer(bytes) } + +// SyscallConn returns a raw network connection. This implements the +// syscall.Conn interface. +// +// SyscallConn is intended for advanced use cases, such as getting and setting +// arbitrary socket options using the socket's file descriptor. If possible, +// those operations should be performed using methods on Conn instead. +// +// Once invoked, it is the caller's responsibility to ensure that operations +// performed using Conn and the syscall.RawConn do not conflict with each other. +func (c *Conn) SyscallConn() (syscall.RawConn, error) { + if atomic.LoadUint32(&c.closed) != 0 { + return nil, os.NewSyscallError("syscallconn", unix.EBADF) + } + + // TODO(mdlayher): mutex or similar to enforce syscall.RawConn contract of + // FD remaining valid for duration of calls? + return c.rc, nil +} + +// Socket wraps the socket(2) system call to produce a Conn. domain, typ, and +// proto are passed directly to socket(2), and name should be a unique name for +// the socket type such as "netlink" or "vsock". +// +// The cfg parameter specifies optional configuration for the Conn. If nil, no +// additional configuration will be applied. +// +// If the operating system supports SOCK_CLOEXEC and SOCK_NONBLOCK, they are +// automatically applied to typ to mirror the standard library's socket flag +// behaviors. +func Socket(domain, typ, proto int, name string, cfg *Config) (*Conn, error) { + if cfg == nil { + cfg = &Config{} + } + + if cfg.NetNS == 0 { + // Non-Linux or no network namespace. + return socket(domain, typ, proto, name) + } + + // Linux only: create Conn in the specified network namespace. + return withNetNS(cfg.NetNS, func() (*Conn, error) { + return socket(domain, typ, proto, name) + }) +} + +// socket is the internal, cross-platform entry point for socket(2). +func socket(domain, typ, proto int, name string) (*Conn, error) { + var ( + fd int + err error + ) + + for { + fd, err = unix.Socket(domain, typ|socketFlags, proto) + switch { + case err == nil: + // Some OSes already set CLOEXEC with typ. + if !flagCLOEXEC { + unix.CloseOnExec(fd) + } + + // No error, prepare the Conn. + return New(fd, name) + case !ready(err): + // System call interrupted or not ready, try again. + continue + case err == unix.EINVAL, err == unix.EPROTONOSUPPORT: + // On Linux, SOCK_NONBLOCK and SOCK_CLOEXEC were introduced in + // 2.6.27. On FreeBSD, both flags were introduced in FreeBSD 10. + // EINVAL and EPROTONOSUPPORT check for earlier versions of these + // OSes respectively. + // + // Mirror what the standard library does when creating file + // descriptors: avoid racing a fork/exec with the creation of new + // file descriptors, so that child processes do not inherit socket + // file descriptors unexpectedly. + // + // For a more thorough explanation, see similar work in the Go tree: + // func sysSocket in net/sock_cloexec.go, as well as the detailed + // comment in syscall/exec_unix.go. + syscall.ForkLock.RLock() + fd, err = unix.Socket(domain, typ, proto) + if err != nil { + syscall.ForkLock.RUnlock() + return nil, os.NewSyscallError("socket", err) + } + unix.CloseOnExec(fd) + syscall.ForkLock.RUnlock() + + return New(fd, name) + default: + // Unhandled error. + return nil, os.NewSyscallError("socket", err) + } + } +} + +// FileConn returns a copy of the network connection corresponding to the open +// file. It is the caller's responsibility to close the file when finished. +// Closing the Conn does not affect the File, and closing the File does not +// affect the Conn. +func FileConn(f *os.File, name string) (*Conn, error) { + // First we'll try to do fctnl(2) with F_DUPFD_CLOEXEC because we can dup + // the file descriptor and set the flag in one syscall. + fd, err := unix.FcntlInt(f.Fd(), unix.F_DUPFD_CLOEXEC, 0) + switch err { + case nil: + // OK, ready to set up non-blocking I/O. + return New(fd, name) + case unix.EINVAL: + // The kernel rejected our fcntl(2), fall back to separate dup(2) and + // setting close on exec. + // + // Mirror what the standard library does when creating file descriptors: + // avoid racing a fork/exec with the creation of new file descriptors, + // so that child processes do not inherit socket file descriptors + // unexpectedly. + syscall.ForkLock.RLock() + fd, err := unix.Dup(fd) + if err != nil { + syscall.ForkLock.RUnlock() + return nil, os.NewSyscallError("dup", err) + } + unix.CloseOnExec(fd) + syscall.ForkLock.RUnlock() + + return New(fd, name) + default: + // Any other errors. + return nil, os.NewSyscallError("fcntl", err) + } +} + +// New wraps an existing file descriptor to create a Conn. name should be a +// unique name for the socket type such as "netlink" or "vsock". +// +// Most callers should use Socket or FileConn to construct a Conn. New is +// intended for integrating with specific system calls which provide a file +// descriptor that supports asynchronous I/O. The file descriptor is immediately +// set to nonblocking mode and registered with Go's runtime network poller for +// future I/O operations. +// +// Unlike FileConn, New does not duplicate the existing file descriptor in any +// way. The returned Conn takes ownership of the underlying file descriptor. +func New(fd int, name string) (*Conn, error) { + // All Conn I/O is nonblocking for integration with Go's runtime network + // poller. Depending on the OS this might already be set but it can't hurt + // to set it again. + if err := unix.SetNonblock(fd, true); err != nil { + return nil, os.NewSyscallError("setnonblock", err) + } + + // os.NewFile registers the non-blocking file descriptor with the runtime + // poller, which is then used for most subsequent operations except those + // that require raw I/O via SyscallConn. + // + // See also: https://golang.org/pkg/os/#NewFile + f := os.NewFile(uintptr(fd), name) + rc, err := f.SyscallConn() + if err != nil { + return nil, err + } + + c := &Conn{ + name: name, + fd: f, + rc: rc, + } + + // Probe the file descriptor for socket settings. + sotype, err := c.GetsockoptInt(unix.SOL_SOCKET, unix.SO_TYPE) + switch { + case err == nil: + // File is a socket, check its properties. + c.facts = facts{ + isStream: sotype == unix.SOCK_STREAM, + zeroReadIsEOF: sotype != unix.SOCK_DGRAM && sotype != unix.SOCK_RAW, + } + case errors.Is(err, unix.ENOTSOCK): + // File is not a socket, treat it as a regular file. + c.facts = facts{ + isStream: true, + zeroReadIsEOF: true, + } + default: + return nil, err + } + + return c, nil +} + +// Low-level methods which provide raw system call access. + +// Accept wraps accept(2) or accept4(2) depending on the operating system, but +// returns a Conn for the accepted connection rather than a raw file descriptor. +// +// If the operating system supports accept4(2) (which allows flags), +// SOCK_CLOEXEC and SOCK_NONBLOCK are automatically applied to flags to mirror +// the standard library's socket flag behaviors. +// +// If the operating system only supports accept(2) (which does not allow flags) +// and flags is not zero, an error will be returned. +// +// Accept obeys context cancelation and uses the deadline set on the context to +// cancel accepting the next connection. If a deadline is set on ctx, this +// deadline will override any previous deadlines set using SetDeadline or +// SetReadDeadline. Upon return, the read deadline is cleared. +func (c *Conn) Accept(ctx context.Context, flags int) (*Conn, unix.Sockaddr, error) { + type ret struct { + nfd int + sa unix.Sockaddr + } + + r, err := readT(c, ctx, sysAccept, func(fd int) (ret, error) { + // Either accept(2) or accept4(2) depending on the OS. + nfd, sa, err := accept(fd, flags|socketFlags) + return ret{nfd, sa}, err + }) + if err != nil { + // internal/poll, context error, or user function error. + return nil, nil, err + } + + // Successfully accepted a connection, wrap it in a Conn for use by the + // caller. + ac, err := New(r.nfd, c.name) + if err != nil { + return nil, nil, err + } + + return ac, r.sa, nil +} + +// Bind wraps bind(2). +func (c *Conn) Bind(sa unix.Sockaddr) error { + return c.control(context.Background(), "bind", func(fd int) error { + return unix.Bind(fd, sa) + }) +} + +// Connect wraps connect(2). In order to verify that the underlying socket is +// connected to a remote peer, Connect calls getpeername(2) and returns the +// unix.Sockaddr from that call. +// +// Connect obeys context cancelation and uses the deadline set on the context to +// cancel connecting to a remote peer. If a deadline is set on ctx, this +// deadline will override any previous deadlines set using SetDeadline or +// SetWriteDeadline. Upon return, the write deadline is cleared. +func (c *Conn) Connect(ctx context.Context, sa unix.Sockaddr) (unix.Sockaddr, error) { + const op = "connect" + + // TODO(mdlayher): it would seem that trying to connect to unbound vsock + // listeners by calling Connect multiple times results in ECONNRESET for the + // first and nil error for subsequent calls. Do we need to memoize the + // error? Check what the stdlib behavior is. + + var ( + // Track progress between invocations of the write closure. We don't + // have an explicit WaitWrite call like internal/poll does, so we have + // to wait until the runtime calls the closure again to indicate we can + // write. + progress uint32 + + // Capture closure sockaddr and error. + rsa unix.Sockaddr + err error + ) + + doErr := c.write(ctx, op, func(fd int) error { + if atomic.AddUint32(&progress, 1) == 1 { + // First call: initiate connect. + return unix.Connect(fd, sa) + } + + // Subsequent calls: the runtime network poller indicates fd is + // writable. Check for errno. + errno, gerr := c.GetsockoptInt(unix.SOL_SOCKET, unix.SO_ERROR) + if gerr != nil { + return gerr + } + if errno != 0 { + // Connection is still not ready or failed. If errno indicates + // the socket is not ready, we will wait for the next write + // event. Otherwise we propagate this errno back to the as a + // permanent error. + uerr := unix.Errno(errno) + err = uerr + return uerr + } + + // According to internal/poll, it's possible for the runtime network + // poller to spuriously wake us and return errno 0 for SO_ERROR. + // Make sure we are actually connected to a peer. + peer, err := c.Getpeername() + if err != nil { + // internal/poll unconditionally goes back to WaitWrite. + // Synthesize an error that will do the same for us. + return unix.EAGAIN + } + + // Connection complete. + rsa = peer + return nil + }) + if doErr != nil { + // internal/poll or context error. + return nil, doErr + } + + if err == unix.EISCONN { + // TODO(mdlayher): is this block obsolete with the addition of the + // getsockopt SO_ERROR check above? + // + // EISCONN is reported if the socket is already established and should + // not be treated as an error. + // - Darwin reports this for at least TCP sockets + // - Linux reports this for at least AF_VSOCK sockets + return rsa, nil + } + + return rsa, os.NewSyscallError(op, err) +} + +// Getsockname wraps getsockname(2). +func (c *Conn) Getsockname() (unix.Sockaddr, error) { + return controlT(c, context.Background(), "getsockname", unix.Getsockname) +} + +// Getpeername wraps getpeername(2). +func (c *Conn) Getpeername() (unix.Sockaddr, error) { + return controlT(c, context.Background(), "getpeername", unix.Getpeername) +} + +// GetsockoptInt wraps getsockopt(2) for integer values. +func (c *Conn) GetsockoptInt(level, opt int) (int, error) { + return controlT(c, context.Background(), "getsockopt", func(fd int) (int, error) { + return unix.GetsockoptInt(fd, level, opt) + }) +} + +// Listen wraps listen(2). +func (c *Conn) Listen(n int) error { + return c.control(context.Background(), "listen", func(fd int) error { + return unix.Listen(fd, n) + }) +} + +// Recvmsg wraps recvmsg(2). +func (c *Conn) Recvmsg(ctx context.Context, p, oob []byte, flags int) (int, int, int, unix.Sockaddr, error) { + type ret struct { + n, oobn, recvflags int + from unix.Sockaddr + } + + r, err := readT(c, ctx, "recvmsg", func(fd int) (ret, error) { + n, oobn, recvflags, from, err := unix.Recvmsg(fd, p, oob, flags) + return ret{n, oobn, recvflags, from}, err + }) + if r.n == 0 && err == nil && c.facts.zeroReadIsEOF { + return 0, 0, 0, nil, io.EOF + } + + return r.n, r.oobn, r.recvflags, r.from, err +} + +// Recvfrom wraps recvfrom(2). +func (c *Conn) Recvfrom(ctx context.Context, p []byte, flags int) (int, unix.Sockaddr, error) { + type ret struct { + n int + addr unix.Sockaddr + } + + out, err := readT(c, ctx, "recvfrom", func(fd int) (ret, error) { + n, addr, err := unix.Recvfrom(fd, p, flags) + return ret{n, addr}, err + }) + if out.n == 0 && err == nil && c.facts.zeroReadIsEOF { + return 0, nil, io.EOF + } + + return out.n, out.addr, err +} + +// Sendmsg wraps sendmsg(2). +func (c *Conn) Sendmsg(ctx context.Context, p, oob []byte, to unix.Sockaddr, flags int) (int, error) { + return writeT(c, ctx, "sendmsg", func(fd int) (int, error) { + return unix.SendmsgN(fd, p, oob, to, flags) + }) +} + +// Sendto wraps sendto(2). +func (c *Conn) Sendto(ctx context.Context, p []byte, flags int, to unix.Sockaddr) error { + return c.write(ctx, "sendto", func(fd int) error { + return unix.Sendto(fd, p, flags, to) + }) +} + +// SetsockoptInt wraps setsockopt(2) for integer values. +func (c *Conn) SetsockoptInt(level, opt, value int) error { + return c.control(context.Background(), "setsockopt", func(fd int) error { + return unix.SetsockoptInt(fd, level, opt, value) + }) +} + +// Shutdown wraps shutdown(2). +func (c *Conn) Shutdown(how int) error { + return c.control(context.Background(), "shutdown", func(fd int) error { + return unix.Shutdown(fd, how) + }) +} + +// Conn low-level read/write/control functions. These functions mirror the +// syscall.RawConn APIs but the input closures return errors rather than +// booleans. + +// read wraps readT to execute a function and capture its error result. This is +// a convenience wrapper for functions which don't return any extra values. +func (c *Conn) read(ctx context.Context, op string, f func(fd int) error) error { + _, err := readT(c, ctx, op, func(fd int) (struct{}, error) { + return struct{}{}, f(fd) + }) + return err +} + +// write executes f, a write function, against the associated file descriptor. +// op is used to create an *os.SyscallError if the file descriptor is closed. +func (c *Conn) write(ctx context.Context, op string, f func(fd int) error) error { + _, err := writeT(c, ctx, op, func(fd int) (struct{}, error) { + return struct{}{}, f(fd) + }) + return err +} + +// readT executes c.rc.Read for op using the input function, returning a newly +// allocated result T. +func readT[T any](c *Conn, ctx context.Context, op string, f func(fd int) (T, error)) (T, error) { + return rwT(c, rwContext[T]{ + Context: ctx, + Type: read, + Op: op, + Do: f, + }) +} + +// writeT executes c.rc.Write for op using the input function, returning a newly +// allocated result T. +func writeT[T any](c *Conn, ctx context.Context, op string, f func(fd int) (T, error)) (T, error) { + return rwT(c, rwContext[T]{ + Context: ctx, + Type: write, + Op: op, + Do: f, + }) +} + +// readWrite indicates if an operation intends to read or write. +type readWrite bool + +// Possible readWrite values. +const ( + read readWrite = false + write readWrite = true +) + +// An rwContext provides arguments to rwT. +type rwContext[T any] struct { + // The caller's context passed for cancelation. + Context context.Context + + // The type of an operation: read or write. + Type readWrite + + // The name of the operation used in errors. + Op string + + // The actual function to perform. + Do func(fd int) (T, error) +} + +// rwT executes c.rc.Read or c.rc.Write (depending on the value of rw.Type) for +// rw.Op using the input function, returning a newly allocated result T. +// +// It obeys context cancelation and the rw.Context must not be nil. +func rwT[T any](c *Conn, rw rwContext[T]) (T, error) { + if atomic.LoadUint32(&c.closed) != 0 { + // If the file descriptor is already closed, do nothing. + return *new(T), os.NewSyscallError(rw.Op, unix.EBADF) + } + + if err := rw.Context.Err(); err != nil { + // Early exit due to context cancel. + return *new(T), os.NewSyscallError(rw.Op, err) + } + + var ( + // The read or write function used to access the runtime network poller. + poll func(func(uintptr) bool) error + + // The read or write function used to set the matching deadline. + deadline func(time.Time) error + ) + + if rw.Type == write { + poll = c.rc.Write + deadline = c.SetWriteDeadline + } else { + poll = c.rc.Read + deadline = c.SetReadDeadline + } + + var ( + // Whether or not the context carried a deadline we are actively using + // for cancelation. + setDeadline bool + + // Signals for the cancelation watcher goroutine. + wg sync.WaitGroup + doneC = make(chan struct{}) + + // Atomic: reports whether we have to disarm the deadline. + // + // TODO(mdlayher): switch back to atomic.Bool when we drop support for + // Go 1.18. + needDisarm int64 + ) + + // On cancel, clean up the watcher. + defer func() { + close(doneC) + wg.Wait() + }() + + if d, ok := rw.Context.Deadline(); ok { + // The context has an explicit deadline. We will use it for cancelation + // but disarm it after poll for the next call. + if err := deadline(d); err != nil { + return *new(T), err + } + setDeadline = true + atomic.AddInt64(&needDisarm, 1) + } else { + // The context does not have an explicit deadline. We have to watch for + // cancelation so we can propagate that signal to immediately unblock + // the runtime network poller. + // + // TODO(mdlayher): is it possible to detect a background context vs a + // context with possible future cancel? + wg.Add(1) + go func() { + defer wg.Done() + + select { + case <-rw.Context.Done(): + // Cancel the operation. Make the caller disarm after poll + // returns. + atomic.AddInt64(&needDisarm, 1) + _ = deadline(time.Unix(0, 1)) + case <-doneC: + // Nothing to do. + } + }() + } + + var ( + t T + err error + ) + + pollErr := poll(func(fd uintptr) bool { + t, err = rw.Do(int(fd)) + return ready(err) + }) + + if atomic.LoadInt64(&needDisarm) > 0 { + _ = deadline(time.Time{}) + } + + if pollErr != nil { + if rw.Context.Err() != nil || (setDeadline && errors.Is(pollErr, os.ErrDeadlineExceeded)) { + // The caller canceled the operation or we set a deadline internally + // and it was reached. + // + // Unpack a plain context error. We wait for the context to be done + // to synchronize state externally. Otherwise we have noticed I/O + // timeout wakeups when we set a deadline but the context was not + // yet marked done. + <-rw.Context.Done() + return *new(T), os.NewSyscallError(rw.Op, rw.Context.Err()) + } + + // Error from syscall.RawConn methods. Conventionally the standard + // library does not wrap internal/poll errors in os.NewSyscallError. + return *new(T), pollErr + } + + // Result from user function. + return t, os.NewSyscallError(rw.Op, err) +} + +// control executes Conn.control for op using the input function. +func (c *Conn) control(ctx context.Context, op string, f func(fd int) error) error { + _, err := controlT(c, ctx, op, func(fd int) (struct{}, error) { + return struct{}{}, f(fd) + }) + return err +} + +// controlT executes c.rc.Control for op using the input function, returning a +// newly allocated result T. +func controlT[T any](c *Conn, ctx context.Context, op string, f func(fd int) (T, error)) (T, error) { + if atomic.LoadUint32(&c.closed) != 0 { + // If the file descriptor is already closed, do nothing. + return *new(T), os.NewSyscallError(op, unix.EBADF) + } + + var ( + t T + err error + ) + + doErr := c.rc.Control(func(fd uintptr) { + // Repeatedly attempt the syscall(s) invoked by f until completion is + // indicated by the return value of ready or the context is canceled. + // + // The last values for t and err are captured outside of the closure for + // use when the loop breaks. + for { + if err = ctx.Err(); err != nil { + // Early exit due to context cancel. + return + } + + t, err = f(int(fd)) + if ready(err) { + return + } + } + }) + if doErr != nil { + // Error from syscall.RawConn methods. Conventionally the standard + // library does not wrap internal/poll errors in os.NewSyscallError. + return *new(T), doErr + } + + // Result from user function. + return t, os.NewSyscallError(op, err) +} + +// ready indicates readiness based on the value of err. +func ready(err error) bool { + switch err { + case unix.EAGAIN, unix.EINPROGRESS, unix.EINTR: + // When a socket is in non-blocking mode, we might see a variety of errors: + // - EAGAIN: most common case for a socket read not being ready + // - EINPROGRESS: reported by some sockets when first calling connect + // - EINTR: system call interrupted, more frequently occurs in Go 1.14+ + // because goroutines can be asynchronously preempted + // + // Return false to let the poller wait for readiness. See the source code + // for internal/poll.FD.RawRead for more details. + return false + default: + // Ready regardless of whether there was an error or no error. + return true + } +} + +// Darwin and FreeBSD can't read or write 2GB+ files at a time, +// even on 64-bit systems. +// The same is true of socket implementations on many systems. +// See golang.org/issue/7812 and golang.org/issue/16266. +// Use 1GB instead of, say, 2GB-1, to keep subsequent reads aligned. +const maxRW = 1 << 30 diff --git a/vendor/github.com/mdlayher/socket/conn_linux.go b/vendor/github.com/mdlayher/socket/conn_linux.go new file mode 100644 index 00000000000..37579d4a0c7 --- /dev/null +++ b/vendor/github.com/mdlayher/socket/conn_linux.go @@ -0,0 +1,118 @@ +//go:build linux +// +build linux + +package socket + +import ( + "context" + "os" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/sys/unix" +) + +// IoctlKCMClone wraps ioctl(2) for unix.KCMClone values, but returns a Conn +// rather than a raw file descriptor. +func (c *Conn) IoctlKCMClone() (*Conn, error) { + info, err := controlT(c, context.Background(), "ioctl", unix.IoctlKCMClone) + if err != nil { + return nil, err + } + + // Successful clone, wrap in a Conn for use by the caller. + return New(int(info.Fd), c.name) +} + +// IoctlKCMAttach wraps ioctl(2) for unix.KCMAttach values. +func (c *Conn) IoctlKCMAttach(info unix.KCMAttach) error { + return c.control(context.Background(), "ioctl", func(fd int) error { + return unix.IoctlKCMAttach(fd, info) + }) +} + +// IoctlKCMUnattach wraps ioctl(2) for unix.KCMUnattach values. +func (c *Conn) IoctlKCMUnattach(info unix.KCMUnattach) error { + return c.control(context.Background(), "ioctl", func(fd int) error { + return unix.IoctlKCMUnattach(fd, info) + }) +} + +// PidfdGetfd wraps pidfd_getfd(2) for a Conn which wraps a pidfd, but returns a +// Conn rather than a raw file descriptor. +func (c *Conn) PidfdGetfd(targetFD, flags int) (*Conn, error) { + outFD, err := controlT(c, context.Background(), "pidfd_getfd", func(fd int) (int, error) { + return unix.PidfdGetfd(fd, targetFD, flags) + }) + if err != nil { + return nil, err + } + + // Successful getfd, wrap in a Conn for use by the caller. + return New(outFD, c.name) +} + +// PidfdSendSignal wraps pidfd_send_signal(2) for a Conn which wraps a Linux +// pidfd. +func (c *Conn) PidfdSendSignal(sig unix.Signal, info *unix.Siginfo, flags int) error { + return c.control(context.Background(), "pidfd_send_signal", func(fd int) error { + return unix.PidfdSendSignal(fd, sig, info, flags) + }) +} + +// SetBPF attaches an assembled BPF program to a Conn. +func (c *Conn) SetBPF(filter []bpf.RawInstruction) error { + // We can't point to the first instruction in the array if no instructions + // are present. + if len(filter) == 0 { + return os.NewSyscallError("setsockopt", unix.EINVAL) + } + + prog := unix.SockFprog{ + Len: uint16(len(filter)), + Filter: (*unix.SockFilter)(unsafe.Pointer(&filter[0])), + } + + return c.SetsockoptSockFprog(unix.SOL_SOCKET, unix.SO_ATTACH_FILTER, &prog) +} + +// RemoveBPF removes a BPF filter from a Conn. +func (c *Conn) RemoveBPF() error { + // 0 argument is ignored. + return c.SetsockoptInt(unix.SOL_SOCKET, unix.SO_DETACH_FILTER, 0) +} + +// SetsockoptPacketMreq wraps setsockopt(2) for unix.PacketMreq values. +func (c *Conn) SetsockoptPacketMreq(level, opt int, mreq *unix.PacketMreq) error { + return c.control(context.Background(), "setsockopt", func(fd int) error { + return unix.SetsockoptPacketMreq(fd, level, opt, mreq) + }) +} + +// SetsockoptSockFprog wraps setsockopt(2) for unix.SockFprog values. +func (c *Conn) SetsockoptSockFprog(level, opt int, fprog *unix.SockFprog) error { + return c.control(context.Background(), "setsockopt", func(fd int) error { + return unix.SetsockoptSockFprog(fd, level, opt, fprog) + }) +} + +// GetsockoptTpacketStats wraps getsockopt(2) for unix.TpacketStats values. +func (c *Conn) GetsockoptTpacketStats(level, name int) (*unix.TpacketStats, error) { + return controlT(c, context.Background(), "getsockopt", func(fd int) (*unix.TpacketStats, error) { + return unix.GetsockoptTpacketStats(fd, level, name) + }) +} + +// GetsockoptTpacketStatsV3 wraps getsockopt(2) for unix.TpacketStatsV3 values. +func (c *Conn) GetsockoptTpacketStatsV3(level, name int) (*unix.TpacketStatsV3, error) { + return controlT(c, context.Background(), "getsockopt", func(fd int) (*unix.TpacketStatsV3, error) { + return unix.GetsockoptTpacketStatsV3(fd, level, name) + }) +} + +// Waitid wraps waitid(2). +func (c *Conn) Waitid(idType int, info *unix.Siginfo, options int, rusage *unix.Rusage) error { + return c.read(context.Background(), "waitid", func(fd int) error { + return unix.Waitid(idType, fd, info, options, rusage) + }) +} diff --git a/vendor/github.com/mdlayher/socket/doc.go b/vendor/github.com/mdlayher/socket/doc.go new file mode 100644 index 00000000000..7d4566c90bf --- /dev/null +++ b/vendor/github.com/mdlayher/socket/doc.go @@ -0,0 +1,13 @@ +// Package socket provides a low-level network connection type which integrates +// with Go's runtime network poller to provide asynchronous I/O and deadline +// support. +// +// This package focuses on UNIX-like operating systems which make use of BSD +// sockets system call APIs. It is meant to be used as a foundation for the +// creation of operating system-specific socket packages, for socket families +// such as Linux's AF_NETLINK, AF_PACKET, or AF_VSOCK. This package should not +// be used directly in end user applications. +// +// Any use of package socket should be guarded by build tags, as one would also +// use when importing the syscall or golang.org/x/sys packages. +package socket diff --git a/vendor/github.com/mdlayher/socket/netns_linux.go b/vendor/github.com/mdlayher/socket/netns_linux.go new file mode 100644 index 00000000000..b29115ad1cf --- /dev/null +++ b/vendor/github.com/mdlayher/socket/netns_linux.go @@ -0,0 +1,150 @@ +//go:build linux +// +build linux + +package socket + +import ( + "errors" + "fmt" + "os" + "runtime" + + "golang.org/x/sync/errgroup" + "golang.org/x/sys/unix" +) + +// errNetNSDisabled is returned when network namespaces are unavailable on +// a given system. +var errNetNSDisabled = errors.New("socket: Linux network namespaces are not enabled on this system") + +// withNetNS invokes fn within the context of the network namespace specified by +// fd, while also managing the logic required to safely do so by manipulating +// thread-local state. +func withNetNS(fd int, fn func() (*Conn, error)) (*Conn, error) { + var ( + eg errgroup.Group + conn *Conn + ) + + eg.Go(func() error { + // Retrieve and store the calling OS thread's network namespace so the + // thread can be reassigned to it after creating a socket in another network + // namespace. + runtime.LockOSThread() + + ns, err := threadNetNS() + if err != nil { + // No thread-local manipulation, unlock. + runtime.UnlockOSThread() + return err + } + defer ns.Close() + + // Beyond this point, the thread's network namespace is poisoned. Do not + // unlock the OS thread until all network namespace manipulation completes + // to avoid returning to the caller with altered thread-local state. + + // Assign the current OS thread the goroutine is locked to to the given + // network namespace. + if err := ns.Set(fd); err != nil { + return err + } + + // Attempt Conn creation and unconditionally restore the original namespace. + c, err := fn() + if nerr := ns.Restore(); nerr != nil { + // Failed to restore original namespace. Return an error and allow the + // runtime to terminate the thread. + if err == nil { + _ = c.Close() + } + + return nerr + } + + // No more thread-local state manipulation; return the new Conn. + runtime.UnlockOSThread() + conn = c + return nil + }) + + if err := eg.Wait(); err != nil { + return nil, err + } + + return conn, nil +} + +// A netNS is a handle that can manipulate network namespaces. +// +// Operations performed on a netNS must use runtime.LockOSThread before +// manipulating any network namespaces. +type netNS struct { + // The handle to a network namespace. + f *os.File + + // Indicates if network namespaces are disabled on this system, and thus + // operations should become a no-op or return errors. + disabled bool +} + +// threadNetNS constructs a netNS using the network namespace of the calling +// thread. If the namespace is not the default namespace, runtime.LockOSThread +// should be invoked first. +func threadNetNS() (*netNS, error) { + return fileNetNS(fmt.Sprintf("/proc/self/task/%d/ns/net", unix.Gettid())) +} + +// fileNetNS opens file and creates a netNS. fileNetNS should only be called +// directly in tests. +func fileNetNS(file string) (*netNS, error) { + f, err := os.Open(file) + switch { + case err == nil: + return &netNS{f: f}, nil + case os.IsNotExist(err): + // Network namespaces are not enabled on this system. Use this signal + // to return errors elsewhere if the caller explicitly asks for a + // network namespace to be set. + return &netNS{disabled: true}, nil + default: + return nil, err + } +} + +// Close releases the handle to a network namespace. +func (n *netNS) Close() error { + return n.do(func() error { return n.f.Close() }) +} + +// FD returns a file descriptor which represents the network namespace. +func (n *netNS) FD() int { + if n.disabled { + // No reasonable file descriptor value in this case, so specify a + // non-existent one. + return -1 + } + + return int(n.f.Fd()) +} + +// Restore restores the original network namespace for the calling thread. +func (n *netNS) Restore() error { + return n.do(func() error { return n.Set(n.FD()) }) +} + +// Set sets a new network namespace for the current thread using fd. +func (n *netNS) Set(fd int) error { + return n.do(func() error { + return os.NewSyscallError("setns", unix.Setns(fd, unix.CLONE_NEWNET)) + }) +} + +// do runs fn if network namespaces are enabled on this system. +func (n *netNS) do(fn func() error) error { + if n.disabled { + return errNetNSDisabled + } + + return fn() +} diff --git a/vendor/github.com/mdlayher/socket/netns_others.go b/vendor/github.com/mdlayher/socket/netns_others.go new file mode 100644 index 00000000000..4cceb3d0477 --- /dev/null +++ b/vendor/github.com/mdlayher/socket/netns_others.go @@ -0,0 +1,14 @@ +//go:build !linux +// +build !linux + +package socket + +import ( + "fmt" + "runtime" +) + +// withNetNS returns an error on non-Linux systems. +func withNetNS(_ int, _ func() (*Conn, error)) (*Conn, error) { + return nil, fmt.Errorf("socket: Linux network namespace support is not available on %s", runtime.GOOS) +} diff --git a/vendor/github.com/mdlayher/socket/setbuffer_linux.go b/vendor/github.com/mdlayher/socket/setbuffer_linux.go new file mode 100644 index 00000000000..0d4aa4417c1 --- /dev/null +++ b/vendor/github.com/mdlayher/socket/setbuffer_linux.go @@ -0,0 +1,24 @@ +//go:build linux +// +build linux + +package socket + +import "golang.org/x/sys/unix" + +// setReadBuffer wraps the SO_RCVBUF{,FORCE} setsockopt(2) options. +func (c *Conn) setReadBuffer(bytes int) error { + err := c.SetsockoptInt(unix.SOL_SOCKET, unix.SO_RCVBUFFORCE, bytes) + if err != nil { + err = c.SetsockoptInt(unix.SOL_SOCKET, unix.SO_RCVBUF, bytes) + } + return err +} + +// setWriteBuffer wraps the SO_SNDBUF{,FORCE} setsockopt(2) options. +func (c *Conn) setWriteBuffer(bytes int) error { + err := c.SetsockoptInt(unix.SOL_SOCKET, unix.SO_SNDBUFFORCE, bytes) + if err != nil { + err = c.SetsockoptInt(unix.SOL_SOCKET, unix.SO_SNDBUF, bytes) + } + return err +} diff --git a/vendor/github.com/mdlayher/socket/setbuffer_others.go b/vendor/github.com/mdlayher/socket/setbuffer_others.go new file mode 100644 index 00000000000..72b36dbe312 --- /dev/null +++ b/vendor/github.com/mdlayher/socket/setbuffer_others.go @@ -0,0 +1,16 @@ +//go:build !linux +// +build !linux + +package socket + +import "golang.org/x/sys/unix" + +// setReadBuffer wraps the SO_RCVBUF setsockopt(2) option. +func (c *Conn) setReadBuffer(bytes int) error { + return c.SetsockoptInt(unix.SOL_SOCKET, unix.SO_RCVBUF, bytes) +} + +// setWriteBuffer wraps the SO_SNDBUF setsockopt(2) option. +func (c *Conn) setWriteBuffer(bytes int) error { + return c.SetsockoptInt(unix.SOL_SOCKET, unix.SO_SNDBUF, bytes) +} diff --git a/vendor/github.com/mdlayher/socket/typ_cloexec_nonblock.go b/vendor/github.com/mdlayher/socket/typ_cloexec_nonblock.go new file mode 100644 index 00000000000..40e834310b7 --- /dev/null +++ b/vendor/github.com/mdlayher/socket/typ_cloexec_nonblock.go @@ -0,0 +1,12 @@ +//go:build !darwin +// +build !darwin + +package socket + +import "golang.org/x/sys/unix" + +const ( + // These operating systems support CLOEXEC and NONBLOCK socket options. + flagCLOEXEC = true + socketFlags = unix.SOCK_CLOEXEC | unix.SOCK_NONBLOCK +) diff --git a/vendor/github.com/mdlayher/socket/typ_none.go b/vendor/github.com/mdlayher/socket/typ_none.go new file mode 100644 index 00000000000..9bbb1aab5fe --- /dev/null +++ b/vendor/github.com/mdlayher/socket/typ_none.go @@ -0,0 +1,11 @@ +//go:build darwin +// +build darwin + +package socket + +const ( + // These operating systems do not support CLOEXEC and NONBLOCK socket + // options. + flagCLOEXEC = false + socketFlags = 0 +) diff --git a/vendor/github.com/mdlayher/vsock/.gitignore b/vendor/github.com/mdlayher/vsock/.gitignore new file mode 100644 index 00000000000..8130d4158a6 --- /dev/null +++ b/vendor/github.com/mdlayher/vsock/.gitignore @@ -0,0 +1,4 @@ +cover.out +vsock.test +cmd/vscp/vscp +cmd/vsockhttp/vsockhttp diff --git a/vendor/github.com/mdlayher/vsock/CHANGELOG.md b/vendor/github.com/mdlayher/vsock/CHANGELOG.md new file mode 100644 index 00000000000..c64a797bc22 --- /dev/null +++ b/vendor/github.com/mdlayher/vsock/CHANGELOG.md @@ -0,0 +1,53 @@ +# CHANGELOG + +# v1.2.1 + +- [Improvement]: updated dependencies, test with Go 1.20. + +# v1.2.0 + +**This is the first release of package vsock that only supports Go 1.18+. Users +on older versions of Go must use v1.1.1.** + +- [Improvement]: drop support for older versions of Go so we can begin using + modern versions of `x/sys` and other dependencies. + +## v1.1.1 + +**This is the last release of package vsock that supports Go 1.17 and below.** + +- [Bug Fix] [commit](https://github.com/mdlayher/vsock/commit/ead86435c244d5d6baad549a6df0557ada3f4401): + fix build on non-UNIX platforms such as Windows. This is a no-op change on + Linux but provides a friendlier experience for non-Linux users. + +## v1.1.0 + +- [New API] [commit](https://github.com/mdlayher/vsock/commit/44cd82dc5f7de644436f22236b111ab97fa9a14f): + `vsock.FileListener` can be used to create a `vsock.Listener` from an existing + `os.File`, which may be provided by systemd socket activation or another + external mechanism. + +## v1.0.1 + +- [Bug Fix] [commit](https://github.com/mdlayher/vsock/commit/99a6dccdebad21d1fa5f757d228d677ccb1412dc): + upgrade `github.com/mdlayher/socket` to handle non-blocking `connect(2)` + errors (called in `vsock.Dial`) properly by checking the `SO_ERROR` socket + option. Lock in this behavior with a new test. +- [Improvement] [commit](https://github.com/mdlayher/vsock/commit/375f3bbcc363500daf367ec511638a4655471719): + downgrade the version of `golang.org/x/net` in use to support Go 1.12. We + don't need the latest version for this package. + +## v1.0.0 + +**This is the first release of package vsock that only supports Go 1.12+. +Users on older versions of Go must use an unstable release.** + +- Initial stable commit! +- [API change]: the `vsock.Dial` and `vsock.Listen` constructors now accept an + optional `*vsock.Config` parameter to enable future expansion in v1.x.x + without prompting further breaking API changes. Because `vsock.Config` has no + options as of this release, `nil` may be passed in all call sites to fix + existing code upon upgrading to v1.0.0. +- [New API]: the `vsock.ListenContextID` function can be used to create a + `*vsock.Listener` which is bound to an explicit context ID address, rather + than inferring one automatically as `vsock.Listen` does. diff --git a/vendor/github.com/mdlayher/vsock/LICENSE.md b/vendor/github.com/mdlayher/vsock/LICENSE.md new file mode 100644 index 00000000000..9fa6774b148 --- /dev/null +++ b/vendor/github.com/mdlayher/vsock/LICENSE.md @@ -0,0 +1,9 @@ +# MIT License + +Copyright (C) 2017-2022 Matt Layher + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mdlayher/vsock/README.md b/vendor/github.com/mdlayher/vsock/README.md new file mode 100644 index 00000000000..b1ec4cfbe13 --- /dev/null +++ b/vendor/github.com/mdlayher/vsock/README.md @@ -0,0 +1,21 @@ +# vsock [![Test Status](https://github.com/mdlayher/vsock/workflows/Linux%20Test/badge.svg)](https://github.com/mdlayher/vsock/actions) [![Go Reference](https://pkg.go.dev/badge/github.com/mdlayher/vsock.svg)](https://pkg.go.dev/github.com/mdlayher/vsock) [![Go Report Card](https://goreportcard.com/badge/github.com/mdlayher/vsock)](https://goreportcard.com/report/github.com/mdlayher/vsock) + +Package `vsock` provides access to Linux VM sockets (`AF_VSOCK`) for +communication between a hypervisor and its virtual machines. MIT Licensed. + +For more information about VM sockets, see my blog about +[Linux VM sockets in Go](https://mdlayher.com/blog/linux-vm-sockets-in-go/) or +the [QEMU wiki page on virtio-vsock](http://wiki.qemu-project.org/Features/VirtioVsock). + +## Stability + +See the [CHANGELOG](./CHANGELOG.md) file for a description of changes between +releases. + +This package has a stable v1 API and any future breaking changes will prompt +the release of a new major version. Features and bug fixes will continue to +occur in the v1.x.x series. + +This package only supports the two most recent major versions of Go, mirroring +Go's own release policy. Older versions of Go may lack critical features and bug +fixes which are necessary for this package to function correctly. diff --git a/vendor/github.com/mdlayher/vsock/conn_linux.go b/vendor/github.com/mdlayher/vsock/conn_linux.go new file mode 100644 index 00000000000..6029d547e5f --- /dev/null +++ b/vendor/github.com/mdlayher/vsock/conn_linux.go @@ -0,0 +1,62 @@ +//go:build linux +// +build linux + +package vsock + +import ( + "context" + + "github.com/mdlayher/socket" + "golang.org/x/sys/unix" +) + +// A conn is the net.Conn implementation for connection-oriented VM sockets. +// We can use socket.Conn directly on Linux to implement all of the necessary +// methods. +type conn = socket.Conn + +// dial is the entry point for Dial on Linux. +func dial(cid, port uint32, _ *Config) (*Conn, error) { + // TODO(mdlayher): Config default nil check and initialize. Pass options to + // socket.Config where necessary. + + c, err := socket.Socket(unix.AF_VSOCK, unix.SOCK_STREAM, 0, "vsock", nil) + if err != nil { + return nil, err + } + + sa := &unix.SockaddrVM{CID: cid, Port: port} + rsa, err := c.Connect(context.Background(), sa) + if err != nil { + _ = c.Close() + return nil, err + } + + // TODO(mdlayher): getpeername(2) appears to return nil in the GitHub CI + // environment, so in the event of a nil sockaddr, fall back to the previous + // method of synthesizing the remote address. + if rsa == nil { + rsa = sa + } + + lsa, err := c.Getsockname() + if err != nil { + _ = c.Close() + return nil, err + } + + lsavm := lsa.(*unix.SockaddrVM) + rsavm := rsa.(*unix.SockaddrVM) + + return &Conn{ + c: c, + local: &Addr{ + ContextID: lsavm.CID, + Port: lsavm.Port, + }, + remote: &Addr{ + ContextID: rsavm.CID, + Port: rsavm.Port, + }, + }, nil +} diff --git a/vendor/github.com/mdlayher/vsock/doc.go b/vendor/github.com/mdlayher/vsock/doc.go new file mode 100644 index 00000000000..e158b18361b --- /dev/null +++ b/vendor/github.com/mdlayher/vsock/doc.go @@ -0,0 +1,10 @@ +// Package vsock provides access to Linux VM sockets (AF_VSOCK) for +// communication between a hypervisor and its virtual machines. +// +// The types in this package implement interfaces provided by package net and +// may be used in applications that expect a net.Listener or net.Conn. +// +// - *Addr implements net.Addr +// - *Conn implements net.Conn +// - *Listener implements net.Listener +package vsock diff --git a/vendor/github.com/mdlayher/vsock/fd_linux.go b/vendor/github.com/mdlayher/vsock/fd_linux.go new file mode 100644 index 00000000000..531e53f928e --- /dev/null +++ b/vendor/github.com/mdlayher/vsock/fd_linux.go @@ -0,0 +1,36 @@ +package vsock + +import ( + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +// contextID retrieves the local context ID for this system. +func contextID() (uint32, error) { + f, err := os.Open(devVsock) + if err != nil { + return 0, err + } + defer f.Close() + + return unix.IoctlGetUint32(int(f.Fd()), unix.IOCTL_VM_SOCKETS_GET_LOCAL_CID) +} + +// isErrno determines if an error a matches UNIX error number. +func isErrno(err error, errno int) bool { + switch errno { + case ebadf: + return err == unix.EBADF + case enotconn: + return err == unix.ENOTCONN + default: + panicf("vsock: isErrno called with unhandled error number parameter: %d", errno) + return false + } +} + +func panicf(format string, a ...interface{}) { + panic(fmt.Sprintf(format, a...)) +} diff --git a/vendor/github.com/mdlayher/vsock/listener_linux.go b/vendor/github.com/mdlayher/vsock/listener_linux.go new file mode 100644 index 00000000000..50fa1b7a49a --- /dev/null +++ b/vendor/github.com/mdlayher/vsock/listener_linux.go @@ -0,0 +1,133 @@ +//go:build linux +// +build linux + +package vsock + +import ( + "context" + "net" + "os" + "time" + + "github.com/mdlayher/socket" + "golang.org/x/sys/unix" +) + +var _ net.Listener = &listener{} + +// A listener is the net.Listener implementation for connection-oriented +// VM sockets. +type listener struct { + c *socket.Conn + addr *Addr +} + +// Addr and Close implement the net.Listener interface for listener. +func (l *listener) Addr() net.Addr { return l.addr } +func (l *listener) Close() error { return l.c.Close() } +func (l *listener) SetDeadline(t time.Time) error { return l.c.SetDeadline(t) } + +// Accept accepts a single connection from the listener, and sets up +// a net.Conn backed by conn. +func (l *listener) Accept() (net.Conn, error) { + c, rsa, err := l.c.Accept(context.Background(), 0) + if err != nil { + return nil, err + } + + savm := rsa.(*unix.SockaddrVM) + remote := &Addr{ + ContextID: savm.CID, + Port: savm.Port, + } + + return &Conn{ + c: c, + local: l.addr, + remote: remote, + }, nil +} + +// name is the socket name passed to package socket. +const name = "vsock" + +// listen is the entry point for Listen on Linux. +func listen(cid, port uint32, _ *Config) (*Listener, error) { + // TODO(mdlayher): Config default nil check and initialize. Pass options to + // socket.Config where necessary. + + c, err := socket.Socket(unix.AF_VSOCK, unix.SOCK_STREAM, 0, name, nil) + if err != nil { + return nil, err + } + + // Be sure to close the Conn if any of the system calls fail before we + // return the Conn to the caller. + + if port == 0 { + port = unix.VMADDR_PORT_ANY + } + + if err := c.Bind(&unix.SockaddrVM{CID: cid, Port: port}); err != nil { + _ = c.Close() + return nil, err + } + + if err := c.Listen(unix.SOMAXCONN); err != nil { + _ = c.Close() + return nil, err + } + + l, err := newListener(c) + if err != nil { + _ = c.Close() + return nil, err + } + + return l, nil +} + +// fileListener is the entry point for FileListener on Linux. +func fileListener(f *os.File) (*Listener, error) { + c, err := socket.FileConn(f, name) + if err != nil { + return nil, err + } + + l, err := newListener(c) + if err != nil { + _ = c.Close() + return nil, err + } + + return l, nil +} + +// newListener creates a Listener from a raw socket.Conn. +func newListener(c *socket.Conn) (*Listener, error) { + lsa, err := c.Getsockname() + if err != nil { + return nil, err + } + + // Now that the library can also accept arbitrary os.Files, we have to + // verify the address family so we don't accidentally create a + // *vsock.Listener backed by TCP or some other socket type. + lsavm, ok := lsa.(*unix.SockaddrVM) + if !ok { + // All errors should wrapped with os.SyscallError. + return nil, os.NewSyscallError("listen", unix.EINVAL) + } + + addr := &Addr{ + ContextID: lsavm.CID, + Port: lsavm.Port, + } + + return &Listener{ + l: &listener{ + c: c, + addr: addr, + }, + }, nil +} diff --git a/vendor/github.com/mdlayher/vsock/vsock.go b/vendor/github.com/mdlayher/vsock/vsock.go new file mode 100644 index 00000000000..78763936ae6 --- /dev/null +++ b/vendor/github.com/mdlayher/vsock/vsock.go @@ -0,0 +1,435 @@ +package vsock + +import ( + "errors" + "fmt" + "io" + "net" + "os" + "strings" + "syscall" + "time" +) + +const ( + // Hypervisor specifies that a socket should communicate with the hypervisor + // process. Note that this is _not_ the same as a socket owned by a process + // running on the hypervisor. Most users should probably use Host instead. + Hypervisor = 0x0 + + // Local specifies that a socket should communicate with a matching socket + // on the same machine. This provides an alternative to UNIX sockets or + // similar and may be useful in testing VM sockets applications. + Local = 0x1 + + // Host specifies that a socket should communicate with processes other than + // the hypervisor on the host machine. This is the correct choice to + // communicate with a process running on a hypervisor using a socket dialed + // from a guest. + Host = 0x2 + + // Error numbers we recognize, copied here to avoid importing x/sys/unix in + // cross-platform code. + ebadf = 9 + enotconn = 107 + + // devVsock is the location of /dev/vsock. It is exposed on both the + // hypervisor and on virtual machines. + devVsock = "/dev/vsock" + + // network is the vsock network reported in net.OpError. + network = "vsock" + + // Operation names which may be returned in net.OpError. + opAccept = "accept" + opClose = "close" + opDial = "dial" + opListen = "listen" + opRawControl = "raw-control" + opRawRead = "raw-read" + opRawWrite = "raw-write" + opRead = "read" + opSet = "set" + opSyscallConn = "syscall-conn" + opWrite = "write" +) + +// TODO(mdlayher): plumb through socket.Config.NetNS if it makes sense. + +// Config contains options for a Conn or Listener. +type Config struct{} + +// Listen opens a connection-oriented net.Listener for incoming VM sockets +// connections. The port parameter specifies the port for the Listener. Config +// specifies optional configuration for the Listener. If config is nil, a +// default configuration will be used. +// +// To allow the server to assign a port automatically, specify 0 for port. The +// address of the server can be retrieved using the Addr method. +// +// Listen automatically infers the appropriate context ID for this machine by +// calling ContextID and passing that value to ListenContextID. Callers with +// advanced use cases (such as using the Local context ID) may wish to use +// ListenContextID directly. +// +// When the Listener is no longer needed, Close must be called to free +// resources. +func Listen(port uint32, cfg *Config) (*Listener, error) { + cid, err := ContextID() + if err != nil { + // No addresses available. + return nil, opError(opListen, err, nil, nil) + } + + return ListenContextID(cid, port, cfg) +} + +// ListenContextID is the same as Listen, but also accepts an explicit context +// ID parameter. This function is intended for advanced use cases and most +// callers should use Listen instead. +// +// See the documentation of Listen for more details. +func ListenContextID(contextID, port uint32, cfg *Config) (*Listener, error) { + l, err := listen(contextID, port, cfg) + if err != nil { + // No remote address available. + return nil, opError(opListen, err, &Addr{ + ContextID: contextID, + Port: port, + }, nil) + } + + return l, nil +} + +// FileListener returns a copy of the network listener corresponding to an open +// os.File. It is the caller's responsibility to close the Listener when +// finished. Closing the Listener does not affect the os.File, and closing the +// os.File does not affect the Listener. +// +// This function is intended for advanced use cases and most callers should use +// Listen instead. +func FileListener(f *os.File) (*Listener, error) { + l, err := fileListener(f) + if err != nil { + // No addresses available. + return nil, opError(opListen, err, nil, nil) + } + + return l, nil +} + +var _ net.Listener = &Listener{} + +// A Listener is a VM sockets implementation of a net.Listener. +type Listener struct { + l *listener +} + +// Accept implements the Accept method in the net.Listener interface; it waits +// for the next call and returns a generic net.Conn. The returned net.Conn will +// always be of type *Conn. +func (l *Listener) Accept() (net.Conn, error) { + c, err := l.l.Accept() + if err != nil { + return nil, l.opError(opAccept, err) + } + + return c, nil +} + +// Addr returns the listener's network address, a *Addr. The Addr returned is +// shared by all invocations of Addr, so do not modify it. +func (l *Listener) Addr() net.Addr { return l.l.Addr() } + +// Close stops listening on the VM sockets address. Already Accepted connections +// are not closed. +func (l *Listener) Close() error { + return l.opError(opClose, l.l.Close()) +} + +// SetDeadline sets the deadline associated with the listener. A zero time value +// disables the deadline. +func (l *Listener) SetDeadline(t time.Time) error { + return l.opError(opSet, l.l.SetDeadline(t)) +} + +// opError is a convenience for the function opError that also passes the local +// address of the Listener. +func (l *Listener) opError(op string, err error) error { + // No remote address for a Listener. + return opError(op, err, l.Addr(), nil) +} + +// Dial dials a connection-oriented net.Conn to a VM sockets listener. The +// context ID and port parameters specify the address of the listener. Config +// specifies optional configuration for the Conn. If config is nil, a default +// configuration will be used. +// +// If dialing a connection from the hypervisor to a virtual machine, the VM's +// context ID should be specified. +// +// If dialing from a VM to the hypervisor, Hypervisor should be used to +// communicate with the hypervisor process, or Host should be used to +// communicate with other processes on the host machine. +// +// When the connection is no longer needed, Close must be called to free +// resources. +func Dial(contextID, port uint32, cfg *Config) (*Conn, error) { + c, err := dial(contextID, port, cfg) + if err != nil { + // No local address, but we have a remote address we can return. + return nil, opError(opDial, err, nil, &Addr{ + ContextID: contextID, + Port: port, + }) + } + + return c, nil +} + +var ( + _ net.Conn = &Conn{} + _ syscall.Conn = &Conn{} +) + +// A Conn is a VM sockets implementation of a net.Conn. +type Conn struct { + c *conn + local *Addr + remote *Addr +} + +// Close closes the connection. +func (c *Conn) Close() error { + return c.opError(opClose, c.c.Close()) +} + +// CloseRead shuts down the reading side of the VM sockets connection. Most +// callers should just use Close. +func (c *Conn) CloseRead() error { + return c.opError(opClose, c.c.CloseRead()) +} + +// CloseWrite shuts down the writing side of the VM sockets connection. Most +// callers should just use Close. +func (c *Conn) CloseWrite() error { + return c.opError(opClose, c.c.CloseWrite()) +} + +// LocalAddr returns the local network address. The Addr returned is shared by +// all invocations of LocalAddr, so do not modify it. +func (c *Conn) LocalAddr() net.Addr { return c.local } + +// RemoteAddr returns the remote network address. The Addr returned is shared by +// all invocations of RemoteAddr, so do not modify it. +func (c *Conn) RemoteAddr() net.Addr { return c.remote } + +// Read implements the net.Conn Read method. +func (c *Conn) Read(b []byte) (int, error) { + n, err := c.c.Read(b) + if err != nil { + return n, c.opError(opRead, err) + } + + return n, nil +} + +// Write implements the net.Conn Write method. +func (c *Conn) Write(b []byte) (int, error) { + n, err := c.c.Write(b) + if err != nil { + return n, c.opError(opWrite, err) + } + + return n, nil +} + +// SetDeadline implements the net.Conn SetDeadline method. +func (c *Conn) SetDeadline(t time.Time) error { + return c.opError(opSet, c.c.SetDeadline(t)) +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.opError(opSet, c.c.SetReadDeadline(t)) +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +func (c *Conn) SetWriteDeadline(t time.Time) error { + return c.opError(opSet, c.c.SetWriteDeadline(t)) +} + +// SyscallConn returns a raw network connection. This implements the +// syscall.Conn interface. +func (c *Conn) SyscallConn() (syscall.RawConn, error) { + rc, err := c.c.SyscallConn() + if err != nil { + return nil, c.opError(opSyscallConn, err) + } + + return &rawConn{ + rc: rc, + local: c.local, + remote: c.remote, + }, nil +} + +// opError is a convenience for the function opError that also passes the local +// and remote addresses of the Conn. +func (c *Conn) opError(op string, err error) error { + return opError(op, err, c.local, c.remote) +} + +// TODO(mdlayher): see if we can port smarter net.OpError with local/remote +// address error logic into socket.Conn's SyscallConn type to avoid the need for +// this wrapper. + +var _ syscall.RawConn = &rawConn{} + +// A rawConn is a syscall.RawConn that wraps an internal syscall.RawConn in order +// to produce net.OpError error values. +type rawConn struct { + rc syscall.RawConn + local, remote *Addr +} + +// Control implements the syscall.RawConn Control method. +func (rc *rawConn) Control(fn func(fd uintptr)) error { + return rc.opError(opRawControl, rc.rc.Control(fn)) +} + +// Control implements the syscall.RawConn Read method. +func (rc *rawConn) Read(fn func(fd uintptr) (done bool)) error { + return rc.opError(opRawRead, rc.rc.Read(fn)) +} + +// Control implements the syscall.RawConn Write method. +func (rc *rawConn) Write(fn func(fd uintptr) (done bool)) error { + return rc.opError(opRawWrite, rc.rc.Write(fn)) +} + +// opError is a convenience for the function opError that also passes the local +// and remote addresses of the rawConn. +func (rc *rawConn) opError(op string, err error) error { + return opError(op, err, rc.local, rc.remote) +} + +var _ net.Addr = &Addr{} + +// An Addr is the address of a VM sockets endpoint. +type Addr struct { + ContextID, Port uint32 +} + +// Network returns the address's network name, "vsock". +func (a *Addr) Network() string { return network } + +// String returns a human-readable representation of Addr, and indicates if +// ContextID is meant to be used for a hypervisor, host, VM, etc. +func (a *Addr) String() string { + var host string + + switch a.ContextID { + case Hypervisor: + host = fmt.Sprintf("hypervisor(%d)", a.ContextID) + case Local: + host = fmt.Sprintf("local(%d)", a.ContextID) + case Host: + host = fmt.Sprintf("host(%d)", a.ContextID) + default: + host = fmt.Sprintf("vm(%d)", a.ContextID) + } + + return fmt.Sprintf("%s:%d", host, a.Port) +} + +// fileName returns a file name for use with os.NewFile for Addr. +func (a *Addr) fileName() string { + return fmt.Sprintf("%s:%s", a.Network(), a.String()) +} + +// ContextID retrieves the local VM sockets context ID for this system. +// ContextID can be used to directly determine if a system is capable of using +// VM sockets. +// +// If the kernel module is unavailable, access to the kernel module is denied, +// or VM sockets are unsupported on this system, it returns an error. +func ContextID() (uint32, error) { + return contextID() +} + +// opError unpacks err if possible, producing a net.OpError with the input +// parameters in order to implement net.Conn. As a convenience, opError returns +// nil if the input error is nil. +func opError(op string, err error, local, remote net.Addr) error { + if err == nil { + return nil + } + + // TODO(mdlayher): this entire function is suspect and should probably be + // looked at carefully, especially with Go 1.13+ error wrapping. + // + // Eventually this *net.OpError logic should probably be ported into + // mdlayher/socket because similar checks are necessary to comply with + // nettest.TestConn. + + // Unwrap inner errors from error types. + // + // TODO(mdlayher): errors.Cause or similar in Go 1.13. + switch xerr := err.(type) { + // os.PathError produced by os.File method calls. + case *os.PathError: + // Although we could make use of xerr.Op here, we're passing it manually + // for consistency, since some of the Conn calls we are making don't + // wrap an os.File, which would return an Op for us. + // + // As a special case, if the error is related to access to the /dev/vsock + // device, we don't unwrap it, so the caller has more context as to why + // their operation actually failed than "permission denied" or similar. + if xerr.Path != devVsock { + err = xerr.Err + } + } + + switch { + case err == io.EOF, isErrno(err, enotconn): + // We may see a literal io.EOF as happens with x/net/nettest, but + // "transport not connected" also means io.EOF in Go. + return io.EOF + case err == os.ErrClosed, isErrno(err, ebadf), strings.Contains(err.Error(), "use of closed"): + // Different operations may return different errors that all effectively + // indicate a closed file. + // + // To rectify the differences, net.TCPConn uses an error with this text + // from internal/poll for the backing file already being closed. + err = errors.New("use of closed network connection") + default: + // Nothing to do, return this directly. + } + + // Determine source and addr using the rules defined by net.OpError's + // documentation: https://golang.org/pkg/net/#OpError. + var source, addr net.Addr + switch op { + case opClose, opDial, opRawRead, opRawWrite, opRead, opWrite: + if local != nil { + source = local + } + if remote != nil { + addr = remote + } + case opAccept, opListen, opRawControl, opSet, opSyscallConn: + if local != nil { + addr = local + } + } + + return &net.OpError{ + Op: op, + Net: network, + Source: source, + Addr: addr, + Err: err, + } +} diff --git a/vendor/github.com/mdlayher/vsock/vsock_others.go b/vendor/github.com/mdlayher/vsock/vsock_others.go new file mode 100644 index 00000000000..5c1e88e3982 --- /dev/null +++ b/vendor/github.com/mdlayher/vsock/vsock_others.go @@ -0,0 +1,45 @@ +//go:build !linux +// +build !linux + +package vsock + +import ( + "fmt" + "net" + "os" + "runtime" + "syscall" + "time" +) + +// errUnimplemented is returned by all functions on platforms that +// cannot make use of VM sockets. +var errUnimplemented = fmt.Errorf("vsock: not implemented on %s", runtime.GOOS) + +func fileListener(_ *os.File) (*Listener, error) { return nil, errUnimplemented } +func listen(_, _ uint32, _ *Config) (*Listener, error) { return nil, errUnimplemented } + +type listener struct{} + +func (*listener) Accept() (net.Conn, error) { return nil, errUnimplemented } +func (*listener) Addr() net.Addr { return nil } +func (*listener) Close() error { return errUnimplemented } +func (*listener) SetDeadline(_ time.Time) error { return errUnimplemented } + +func dial(_, _ uint32, _ *Config) (*Conn, error) { return nil, errUnimplemented } + +type conn struct{} + +func (*conn) Close() error { return errUnimplemented } +func (*conn) CloseRead() error { return errUnimplemented } +func (*conn) CloseWrite() error { return errUnimplemented } +func (*conn) Read(_ []byte) (int, error) { return 0, errUnimplemented } +func (*conn) Write(_ []byte) (int, error) { return 0, errUnimplemented } +func (*conn) SetDeadline(_ time.Time) error { return errUnimplemented } +func (*conn) SetReadDeadline(_ time.Time) error { return errUnimplemented } +func (*conn) SetWriteDeadline(_ time.Time) error { return errUnimplemented } +func (*conn) SyscallConn() (syscall.RawConn, error) { return nil, errUnimplemented } + +func contextID() (uint32, error) { return 0, errUnimplemented } + +func isErrno(_ error, _ int) bool { return false } diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 10ddda14273..8d5a2a47893 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -148,6 +148,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 3225 - DO bit (DNSSEC OK) * 340{1,2,3} - NAPTR record * 3445 - Limiting the scope of (DNS)KEY +* 3596 - AAAA record * 3597 - Unknown RRs * 4025 - A Method for Storing IPsec Keying Material in DNS * 403{3,4,5} - DNSSEC + validation functions diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go index 1b58e8f0aa9..c1bbdaae2e9 100644 --- a/vendor/github.com/miekg/dns/edns.go +++ b/vendor/github.com/miekg/dns/edns.go @@ -756,36 +756,48 @@ const ( ExtendedErrorCodeNoReachableAuthority ExtendedErrorCodeNetworkError ExtendedErrorCodeInvalidData + ExtendedErrorCodeSignatureExpiredBeforeValid + ExtendedErrorCodeTooEarly + ExtendedErrorCodeUnsupportedNSEC3IterValue + ExtendedErrorCodeUnableToConformToPolicy + ExtendedErrorCodeSynthesized + ExtendedErrorCodeInvalidQueryType ) // ExtendedErrorCodeToString maps extended error info codes to a human readable // description. var ExtendedErrorCodeToString = map[uint16]string{ - ExtendedErrorCodeOther: "Other", - ExtendedErrorCodeUnsupportedDNSKEYAlgorithm: "Unsupported DNSKEY Algorithm", - ExtendedErrorCodeUnsupportedDSDigestType: "Unsupported DS Digest Type", - ExtendedErrorCodeStaleAnswer: "Stale Answer", - ExtendedErrorCodeForgedAnswer: "Forged Answer", - ExtendedErrorCodeDNSSECIndeterminate: "DNSSEC Indeterminate", - ExtendedErrorCodeDNSBogus: "DNSSEC Bogus", - ExtendedErrorCodeSignatureExpired: "Signature Expired", - ExtendedErrorCodeSignatureNotYetValid: "Signature Not Yet Valid", - ExtendedErrorCodeDNSKEYMissing: "DNSKEY Missing", - ExtendedErrorCodeRRSIGsMissing: "RRSIGs Missing", - ExtendedErrorCodeNoZoneKeyBitSet: "No Zone Key Bit Set", - ExtendedErrorCodeNSECMissing: "NSEC Missing", - ExtendedErrorCodeCachedError: "Cached Error", - ExtendedErrorCodeNotReady: "Not Ready", - ExtendedErrorCodeBlocked: "Blocked", - ExtendedErrorCodeCensored: "Censored", - ExtendedErrorCodeFiltered: "Filtered", - ExtendedErrorCodeProhibited: "Prohibited", - ExtendedErrorCodeStaleNXDOMAINAnswer: "Stale NXDOMAIN Answer", - ExtendedErrorCodeNotAuthoritative: "Not Authoritative", - ExtendedErrorCodeNotSupported: "Not Supported", - ExtendedErrorCodeNoReachableAuthority: "No Reachable Authority", - ExtendedErrorCodeNetworkError: "Network Error", - ExtendedErrorCodeInvalidData: "Invalid Data", + ExtendedErrorCodeOther: "Other", + ExtendedErrorCodeUnsupportedDNSKEYAlgorithm: "Unsupported DNSKEY Algorithm", + ExtendedErrorCodeUnsupportedDSDigestType: "Unsupported DS Digest Type", + ExtendedErrorCodeStaleAnswer: "Stale Answer", + ExtendedErrorCodeForgedAnswer: "Forged Answer", + ExtendedErrorCodeDNSSECIndeterminate: "DNSSEC Indeterminate", + ExtendedErrorCodeDNSBogus: "DNSSEC Bogus", + ExtendedErrorCodeSignatureExpired: "Signature Expired", + ExtendedErrorCodeSignatureNotYetValid: "Signature Not Yet Valid", + ExtendedErrorCodeDNSKEYMissing: "DNSKEY Missing", + ExtendedErrorCodeRRSIGsMissing: "RRSIGs Missing", + ExtendedErrorCodeNoZoneKeyBitSet: "No Zone Key Bit Set", + ExtendedErrorCodeNSECMissing: "NSEC Missing", + ExtendedErrorCodeCachedError: "Cached Error", + ExtendedErrorCodeNotReady: "Not Ready", + ExtendedErrorCodeBlocked: "Blocked", + ExtendedErrorCodeCensored: "Censored", + ExtendedErrorCodeFiltered: "Filtered", + ExtendedErrorCodeProhibited: "Prohibited", + ExtendedErrorCodeStaleNXDOMAINAnswer: "Stale NXDOMAIN Answer", + ExtendedErrorCodeNotAuthoritative: "Not Authoritative", + ExtendedErrorCodeNotSupported: "Not Supported", + ExtendedErrorCodeNoReachableAuthority: "No Reachable Authority", + ExtendedErrorCodeNetworkError: "Network Error", + ExtendedErrorCodeInvalidData: "Invalid Data", + ExtendedErrorCodeSignatureExpiredBeforeValid: "Signature Expired Before Valid", + ExtendedErrorCodeTooEarly: "Too Early", + ExtendedErrorCodeUnsupportedNSEC3IterValue: "Unsupported NSEC3 Iterations Value", + ExtendedErrorCodeUnableToConformToPolicy: "Unable To Conform To Policy", + ExtendedErrorCodeSynthesized: "Synthesized", + ExtendedErrorCodeInvalidQueryType: "Invalid Query Type", } // StringToExtendedErrorCode is a map from human readable descriptions to diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index 8e3129cbd25..7a34c14ca0a 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -96,6 +96,7 @@ const ( TypeLP uint16 = 107 TypeEUI48 uint16 = 108 TypeEUI64 uint16 = 109 + TypeNXNAME uint16 = 128 TypeURI uint16 = 256 TypeCAA uint16 = 257 TypeAVC uint16 = 258 @@ -294,6 +295,19 @@ func (*NULL) parse(c *zlexer, origin string) *ParseError { return &ParseError{err: "NULL records do not have a presentation format"} } +// NXNAME is a meta record. See https://www.iana.org/go/draft-ietf-dnsop-compact-denial-of-existence-04 +// Reference: https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml +type NXNAME struct { + Hdr RR_Header + // Does not have any rdata +} + +func (rr *NXNAME) String() string { return rr.Hdr.String() } + +func (*NXNAME) parse(c *zlexer, origin string) *ParseError { + return &ParseError{err: "NXNAME records do not have a presentation format"} +} + // CNAME RR. See RFC 1034. type CNAME struct { Hdr RR_Header diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index dc34e5902be..00c8629f278 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 58} +var Version = v{1, 1, 62} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go index 03029fb3ebb..330c05395f3 100644 --- a/vendor/github.com/miekg/dns/zduplicate.go +++ b/vendor/github.com/miekg/dns/zduplicate.go @@ -886,6 +886,15 @@ func (r1 *NULL) isDuplicate(_r2 RR) bool { return true } +func (r1 *NXNAME) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NXNAME) + if !ok { + return false + } + _ = r2 + return true +} + func (r1 *NXT) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*NXT) if !ok { diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go index 39b3bc8102e..5a6cf4c6ad5 100644 --- a/vendor/github.com/miekg/dns/zmsg.go +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -706,6 +706,10 @@ func (rr *NULL) pack(msg []byte, off int, compression compressionMap, compress b return off, nil } +func (rr *NXNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + return off, nil +} + func (rr *NXT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packDomainName(rr.NextDomain, msg, off, compression, false) if err != nil { @@ -2266,6 +2270,13 @@ func (rr *NULL) unpack(msg []byte, off int) (off1 int, err error) { return off, nil } +func (rr *NXNAME) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + return off, nil +} + func (rr *NXT) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go index 2c70fc44d6f..11f13ecf9c2 100644 --- a/vendor/github.com/miekg/dns/ztypes.go +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -60,6 +60,7 @@ var TypeToRR = map[uint16]func() RR{ TypeNSEC3: func() RR { return new(NSEC3) }, TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, TypeNULL: func() RR { return new(NULL) }, + TypeNXNAME: func() RR { return new(NXNAME) }, TypeNXT: func() RR { return new(NXT) }, TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, TypeOPT: func() RR { return new(OPT) }, @@ -146,6 +147,7 @@ var TypeToString = map[uint16]string{ TypeNSEC3: "NSEC3", TypeNSEC3PARAM: "NSEC3PARAM", TypeNULL: "NULL", + TypeNXNAME: "NXNAME", TypeNXT: "NXT", TypeNone: "None", TypeOPENPGPKEY: "OPENPGPKEY", @@ -230,6 +232,7 @@ func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } func (rr *NULL) Header() *RR_Header { return &rr.Hdr } +func (rr *NXNAME) Header() *RR_Header { return &rr.Hdr } func (rr *NXT) Header() *RR_Header { return &rr.Hdr } func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } func (rr *OPT) Header() *RR_Header { return &rr.Hdr } @@ -594,6 +597,11 @@ func (rr *NULL) len(off int, compression map[string]struct{}) int { return l } +func (rr *NXNAME) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + return l +} + func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) @@ -1107,6 +1115,10 @@ func (rr *NULL) copy() RR { return &NULL{rr.Hdr, rr.Data} } +func (rr *NXNAME) copy() RR { + return &NXNAME{rr.Hdr} +} + func (rr *NXT) copy() RR { return &NXT{*rr.NSEC.copy().(*NSEC)} } diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go index 481c47b46e5..abb2c900183 100644 --- a/vendor/github.com/prometheus/common/model/labelset_string.go +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -11,8 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.21 - package model import ( diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go deleted file mode 100644 index c4212685e71..00000000000 --- a/vendor/github.com/prometheus/common/model/labelset_string_go120.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.21 - -package model - -import ( - "fmt" - "sort" - "strings" -) - -// String was optimized using functions not available for go 1.20 -// or lower. We keep the old implementation for compatibility with client_golang. -// Once client golang drops support for go 1.20 (scheduled for August 2024), this -// file can be removed. -func (l LabelSet) String() string { - labelNames := make([]string, 0, len(l)) - for name := range l { - labelNames = append(labelNames, string(name)) - } - sort.Strings(labelNames) - lstrs := make([]string, 0, len(l)) - for _, name := range labelNames { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) - } - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} diff --git a/vendor/github.com/prometheus/common/promslog/slog.go b/vendor/github.com/prometheus/common/promslog/slog.go new file mode 100644 index 00000000000..f77ce91eef7 --- /dev/null +++ b/vendor/github.com/prometheus/common/promslog/slog.go @@ -0,0 +1,186 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promslog defines standardised ways to initialize the Go standard +// library's log/slog logger. +// It should typically only ever be imported by main packages. + +package promslog + +import ( + "fmt" + "io" + "log/slog" + "os" + "path/filepath" + "strconv" + "strings" +) + +type LogStyle string + +const ( + SlogStyle LogStyle = "slog" + GoKitStyle LogStyle = "go-kit" +) + +var ( + LevelFlagOptions = []string{"debug", "info", "warn", "error"} + FormatFlagOptions = []string{"logfmt", "json"} + + callerAddFunc = false + defaultWriter = os.Stderr + goKitStyleReplaceAttrFunc = func(groups []string, a slog.Attr) slog.Attr { + key := a.Key + switch key { + case slog.TimeKey: + a.Key = "ts" + + // This timestamp format differs from RFC3339Nano by using .000 instead + // of .999999999 which changes the timestamp from 9 variable to 3 fixed + // decimals (.130 instead of .130987456). + t := a.Value.Time() + a.Value = slog.StringValue(t.UTC().Format("2006-01-02T15:04:05.000Z07:00")) + case slog.SourceKey: + a.Key = "caller" + src, _ := a.Value.Any().(*slog.Source) + + switch callerAddFunc { + case true: + a.Value = slog.StringValue(filepath.Base(src.File) + "(" + filepath.Base(src.Function) + "):" + strconv.Itoa(src.Line)) + default: + a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line)) + } + case slog.LevelKey: + a.Value = slog.StringValue(strings.ToLower(a.Value.String())) + default: + } + + return a + } +) + +// AllowedLevel is a settable identifier for the minimum level a log entry +// must be have. +type AllowedLevel struct { + s string + lvl *slog.LevelVar +} + +func (l *AllowedLevel) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + type plain string + if err := unmarshal((*plain)(&s)); err != nil { + return err + } + if s == "" { + return nil + } + lo := &AllowedLevel{} + if err := lo.Set(s); err != nil { + return err + } + *l = *lo + return nil +} + +func (l *AllowedLevel) String() string { + return l.s +} + +// Set updates the value of the allowed level. +func (l *AllowedLevel) Set(s string) error { + if l.lvl == nil { + l.lvl = &slog.LevelVar{} + } + + switch s { + case "debug": + l.lvl.Set(slog.LevelDebug) + callerAddFunc = true + case "info": + l.lvl.Set(slog.LevelInfo) + callerAddFunc = false + case "warn": + l.lvl.Set(slog.LevelWarn) + callerAddFunc = false + case "error": + l.lvl.Set(slog.LevelError) + callerAddFunc = false + default: + return fmt.Errorf("unrecognized log level %s", s) + } + l.s = s + return nil +} + +// AllowedFormat is a settable identifier for the output format that the logger can have. +type AllowedFormat struct { + s string +} + +func (f *AllowedFormat) String() string { + return f.s +} + +// Set updates the value of the allowed format. +func (f *AllowedFormat) Set(s string) error { + switch s { + case "logfmt", "json": + f.s = s + default: + return fmt.Errorf("unrecognized log format %s", s) + } + return nil +} + +// Config is a struct containing configurable settings for the logger +type Config struct { + Level *AllowedLevel + Format *AllowedFormat + Style LogStyle + Writer io.Writer +} + +// New returns a new slog.Logger. Each logged line will be annotated +// with a timestamp. The output always goes to stderr. +func New(config *Config) *slog.Logger { + if config.Level == nil { + config.Level = &AllowedLevel{} + _ = config.Level.Set("info") + } + + if config.Writer == nil { + config.Writer = defaultWriter + } + + logHandlerOpts := &slog.HandlerOptions{ + Level: config.Level.lvl, + AddSource: true, + } + + if config.Style == GoKitStyle { + logHandlerOpts.ReplaceAttr = goKitStyleReplaceAttrFunc + } + + if config.Format != nil && config.Format.s == "json" { + return slog.New(slog.NewJSONHandler(config.Writer, logHandlerOpts)) + } + return slog.New(slog.NewTextHandler(config.Writer, logHandlerOpts)) +} + +// NewNopLogger is a convenience function to return an slog.Logger that writes +// to io.Discard. +func NewNopLogger() *slog.Logger { + return slog.New(slog.NewTextHandler(io.Discard, nil)) +} diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/cache.go b/vendor/github.com/prometheus/exporter-toolkit/web/cache.go index 9425e7ac918..252928eeaa5 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/cache.go +++ b/vendor/github.com/prometheus/exporter-toolkit/web/cache.go @@ -18,15 +18,10 @@ package web import ( weakrand "math/rand" "sync" - "time" ) var cacheSize = 100 -func init() { - weakrand.Seed(time.Now().UnixNano()) -} - type cache struct { cache map[string]bool mtx sync.Mutex diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/handler.go b/vendor/github.com/prometheus/exporter-toolkit/web/handler.go index c607a163a32..51da762c957 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/handler.go +++ b/vendor/github.com/prometheus/exporter-toolkit/web/handler.go @@ -18,11 +18,11 @@ package web import ( "encoding/hex" "fmt" + "log/slog" "net/http" "strings" "sync" - "github.com/go-kit/log" "golang.org/x/crypto/bcrypt" ) @@ -78,7 +78,7 @@ HeadersLoop: type webHandler struct { tlsConfigPath string handler http.Handler - logger log.Logger + logger *slog.Logger cache *cache // bcryptMtx is there to ensure that bcrypt.CompareHashAndPassword is run // only once in parallel as this is CPU intensive. @@ -88,7 +88,7 @@ type webHandler struct { func (u *webHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { c, err := getConfig(u.tlsConfigPath) if err != nil { - u.logger.Log("msg", "Unable to parse configuration", "err", err) + u.logger.Error("Unable to parse configuration", "err", err.Error()) http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) return } diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.css b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.css index 260bc8a0967..0dd728a9f75 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.css +++ b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.css @@ -15,4 +15,9 @@ label { display: inline-block; width: {{.Form.Width}}em; } +#pprof { + border: black 2px solid; + padding: 1rem; + width: fit-content; +} {{.ExtraCSS}} diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.html b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.html index 4f2e1817729..e1ac0aecdd2 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.html +++ b/vendor/github.com/prometheus/exporter-toolkit/web/landing_page.html @@ -30,6 +30,14 @@

{{.Name}}

{{ end }} {{ .ExtraHTML }} +
+ Download a detailed report of resource usage (pprof format, from the Go runtime): + + To visualize and share profiles you can upload to pprof.me +
diff --git a/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go b/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go index 61383bce162..0730a938fde 100644 --- a/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go +++ b/vendor/github.com/prometheus/exporter-toolkit/web/tls_config.go @@ -18,14 +18,17 @@ import ( "crypto/x509" "errors" "fmt" + "log/slog" "net" "net/http" + "net/url" "os" "path/filepath" + "strconv" + "strings" "github.com/coreos/go-systemd/v22/activation" - "github.com/go-kit/log" - "github.com/go-kit/log/level" + "github.com/mdlayher/vsock" config_util "github.com/prometheus/common/config" "golang.org/x/sync/errgroup" "gopkg.in/yaml.v2" @@ -263,7 +266,7 @@ func ConfigToTLSConfig(c *TLSConfig) (*tls.Config, error) { // ServeMultiple starts the server on the given listeners. The FlagConfig is // also passed on to Serve. -func ServeMultiple(listeners []net.Listener, server *http.Server, flags *FlagConfig, logger log.Logger) error { +func ServeMultiple(listeners []net.Listener, server *http.Server, flags *FlagConfig, logger *slog.Logger) error { errs := new(errgroup.Group) for _, l := range listeners { l := l @@ -275,16 +278,18 @@ func ServeMultiple(listeners []net.Listener, server *http.Server, flags *FlagCon } // ListenAndServe starts the server on addresses given in WebListenAddresses in -// the FlagConfig or instead uses systemd socket activated listeners if -// WebSystemdSocket in the FlagConfig is true. The FlagConfig is also passed on -// to ServeMultiple. -func ListenAndServe(server *http.Server, flags *FlagConfig, logger log.Logger) error { +// the FlagConfig. When address starts looks like vsock://:{port}, it listens on +// vsock. More info check https://wiki.qemu.org/Features/VirtioVsock . +// Or instead uses systemd socket activated listeners if WebSystemdSocket in the +// FlagConfig is true. +// The FlagConfig is also passed on to ServeMultiple. +func ListenAndServe(server *http.Server, flags *FlagConfig, logger *slog.Logger) error { if flags.WebSystemdSocket == nil && (flags.WebListenAddresses == nil || len(*flags.WebListenAddresses) == 0) { return ErrNoListeners } if flags.WebSystemdSocket != nil && *flags.WebSystemdSocket { - level.Info(logger).Log("msg", "Listening on systemd activated listeners instead of port listeners.") + logger.Info("Listening on systemd activated listeners instead of port listeners.") listeners, err := activation.Listeners() if err != nil { return err @@ -297,9 +302,22 @@ func ListenAndServe(server *http.Server, flags *FlagConfig, logger log.Logger) e listeners := make([]net.Listener, 0, len(*flags.WebListenAddresses)) for _, address := range *flags.WebListenAddresses { - listener, err := net.Listen("tcp", address) - if err != nil { - return err + var err error + var listener net.Listener + if strings.HasPrefix(address, "vsock://") { + port, err := parseVsockPort(address) + if err != nil { + return err + } + listener, err = vsock.Listen(port, nil) + if err != nil { + return err + } + } else { + listener, err = net.Listen("tcp", address) + if err != nil { + return err + } } defer listener.Close() listeners = append(listeners, listener) @@ -307,13 +325,29 @@ func ListenAndServe(server *http.Server, flags *FlagConfig, logger log.Logger) e return ServeMultiple(listeners, server, flags, logger) } +func parseVsockPort(address string) (uint32, error) { + uri, err := url.Parse(address) + if err != nil { + return 0, err + } + _, portStr, err := net.SplitHostPort(uri.Host) + if err != nil { + return 0, err + } + port, err := strconv.ParseUint(portStr, 10, 32) + if err != nil { + return 0, err + } + return uint32(port), nil +} + // Server starts the server on the given listener. Based on the file path // WebConfigFile in the FlagConfig, TLS or basic auth could be enabled. -func Serve(l net.Listener, server *http.Server, flags *FlagConfig, logger log.Logger) error { - level.Info(logger).Log("msg", "Listening on", "address", l.Addr().String()) +func Serve(l net.Listener, server *http.Server, flags *FlagConfig, logger *slog.Logger) error { + logger.Info("Listening on", "address", l.Addr().String()) tlsConfigPath := *flags.WebConfigFile if tlsConfigPath == "" { - level.Info(logger).Log("msg", "TLS is disabled.", "http2", false, "address", l.Addr().String()) + logger.Info("TLS is disabled.", "http2", false, "address", l.Addr().String()) return server.Serve(l) } @@ -346,10 +380,10 @@ func Serve(l net.Listener, server *http.Server, flags *FlagConfig, logger log.Lo server.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler)) } // Valid TLS config. - level.Info(logger).Log("msg", "TLS is enabled.", "http2", c.HTTPConfig.HTTP2, "address", l.Addr().String()) + logger.Info("TLS is enabled.", "http2", c.HTTPConfig.HTTP2, "address", l.Addr().String()) case errNoTLSConfig: // No TLS config, back to plain HTTP. - level.Info(logger).Log("msg", "TLS is disabled.", "http2", false, "address", l.Addr().String()) + logger.Info("TLS is disabled.", "http2", false, "address", l.Addr().String()) return server.Serve(l) default: // Invalid TLS config. @@ -477,6 +511,6 @@ func (tv *TLSVersion) MarshalYAML() (interface{}, error) { // tlsConfigPath, TLS or basic auth could be enabled. // // Deprecated: Use ListenAndServe instead. -func Listen(server *http.Server, flags *FlagConfig, logger log.Logger) error { +func Listen(server *http.Server, flags *FlagConfig, logger *slog.Logger) error { return ListenAndServe(server, flags, logger) } diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index c9e8efbf3e8..30a74e04025 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -16,6 +16,8 @@ package config import ( "errors" "fmt" + "log/slog" + "mime" "net/url" "os" "path/filepath" @@ -25,8 +27,6 @@ import ( "time" "github.com/alecthomas/units" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/regexp" "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -73,7 +73,7 @@ const ( ) // Load parses the YAML input s into a Config. -func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) { +func Load(s string, logger *slog.Logger) (*Config, error) { cfg := &Config{} // If the entire config body is empty the UnmarshalYAML method is // never called. We thus have to set the DefaultConfig at the entry @@ -85,10 +85,6 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro return nil, err } - if !expandExternalLabels { - return cfg, nil - } - b := labels.NewScratchBuilder(0) cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) { newV := os.Expand(v.Value, func(s string) string { @@ -98,26 +94,28 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro if v := os.Getenv(s); v != "" { return v } - level.Warn(logger).Log("msg", "Empty environment variable", "name", s) + logger.Warn("Empty environment variable", "name", s) return "" }) if newV != v.Value { - level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV) + logger.Debug("External label replaced", "label", v.Name, "input", v.Value, "output", newV) } // Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024 b.Add(v.Name, newV) }) - cfg.GlobalConfig.ExternalLabels = b.Labels() + if !b.Labels().IsEmpty() { + cfg.GlobalConfig.ExternalLabels = b.Labels() + } return cfg, nil } // LoadFile parses the given YAML file into a Config. -func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) { +func LoadFile(filename string, agentMode bool, logger *slog.Logger) (*Config, error) { content, err := os.ReadFile(filename) if err != nil { return nil, err } - cfg, err := Load(string(content), expandExternalLabels, logger) + cfg, err := Load(string(content), logger) if err != nil { return nil, fmt.Errorf("parsing YAML file %s: %w", filename, err) } @@ -166,13 +164,13 @@ var ( // DefaultScrapeConfig is the default scrape configuration. DefaultScrapeConfig = ScrapeConfig{ // ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals. - ScrapeClassicHistograms: false, - MetricsPath: "/metrics", - Scheme: "http", - HonorLabels: false, - HonorTimestamps: true, - HTTPClientConfig: config.DefaultHTTPClientConfig, - EnableCompression: true, + AlwaysScrapeClassicHistograms: false, + MetricsPath: "/metrics", + Scheme: "http", + HonorLabels: false, + HonorTimestamps: true, + HTTPClientConfig: config.DefaultHTTPClientConfig, + EnableCompression: true, } // DefaultAlertmanagerConfig is the default alertmanager configuration. @@ -183,13 +181,18 @@ var ( HTTPClientConfig: config.DefaultHTTPClientConfig, } + DefaultRemoteWriteHTTPClientConfig = config.HTTPClientConfig{ + FollowRedirects: true, + EnableHTTP2: false, + } + // DefaultRemoteWriteConfig is the default remote write configuration. DefaultRemoteWriteConfig = RemoteWriteConfig{ RemoteTimeout: model.Duration(30 * time.Second), ProtobufMessage: RemoteWriteProtoMsgV1, QueueConfig: DefaultQueueConfig, MetadataConfig: DefaultMetadataConfig, - HTTPClientConfig: config.DefaultHTTPClientConfig, + HTTPClientConfig: DefaultRemoteWriteHTTPClientConfig, } // DefaultQueueConfig is the default remote queue configuration. @@ -429,6 +432,8 @@ type GlobalConfig struct { RuleQueryOffset model.Duration `yaml:"rule_query_offset,omitempty"` // File to which PromQL queries are logged. QueryLogFile string `yaml:"query_log_file,omitempty"` + // File to which scrape failures are logged. + ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` // The labels to add to any timeseries that this Prometheus instance scrapes. ExternalLabels labels.Labels `yaml:"external_labels,omitempty"` // An uncompressed response body larger than this many bytes will cause the @@ -474,9 +479,22 @@ func (s ScrapeProtocol) Validate() error { return nil } +// HeaderMediaType returns the MIME mediaType for a particular ScrapeProtocol. +func (s ScrapeProtocol) HeaderMediaType() string { + if _, ok := ScrapeProtocolsHeaders[s]; !ok { + return "" + } + mediaType, _, err := mime.ParseMediaType(ScrapeProtocolsHeaders[s]) + if err != nil { + return "" + } + return mediaType +} + var ( PrometheusProto ScrapeProtocol = "PrometheusProto" PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4" + PrometheusText1_0_0 ScrapeProtocol = "PrometheusText1.0.0" OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1" OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0" UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8 @@ -484,6 +502,7 @@ var ( ScrapeProtocolsHeaders = map[ScrapeProtocol]string{ PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", PrometheusText0_0_4: "text/plain;version=0.0.4", + PrometheusText1_0_0: "text/plain;version=1.0.0;escaping=allow-utf-8", OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1", OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0", } @@ -493,6 +512,7 @@ var ( DefaultScrapeProtocols = []ScrapeProtocol{ OpenMetricsText1_0_0, OpenMetricsText0_0_1, + PrometheusText1_0_0, PrometheusText0_0_4, } @@ -504,6 +524,7 @@ var ( PrometheusProto, OpenMetricsText1_0_0, OpenMetricsText0_0_1, + PrometheusText1_0_0, PrometheusText0_0_4, } ) @@ -529,6 +550,7 @@ func validateAcceptScrapeProtocols(sps []ScrapeProtocol) error { // SetDirectory joins any relative file paths with dir. func (c *GlobalConfig) SetDirectory(dir string) { c.QueryLogFile = config.JoinDir(dir, c.QueryLogFile) + c.ScrapeFailureLogFile = config.JoinDir(dir, c.ScrapeFailureLogFile) } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -591,6 +613,7 @@ func (c *GlobalConfig) isZero() bool { c.EvaluationInterval == 0 && c.RuleQueryOffset == 0 && c.QueryLogFile == "" && + c.ScrapeFailureLogFile == "" && c.ScrapeProtocols == nil } @@ -628,10 +651,19 @@ type ScrapeConfig struct { // The protocols to negotiate during a scrape. It tells clients what // protocol are accepted by Prometheus and with what preference (most wanted is first). // Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, - // OpenMetricsText1.0.0, PrometheusText0.0.4. + // OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4. ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"` - // Whether to scrape a classic histogram that is also exposed as a native histogram. - ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"` + // The fallback protocol to use if the Content-Type provided by the target + // is not provided, blank, or not one of the expected values. + // Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, + // OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4. + ScrapeFallbackProtocol ScrapeProtocol `yaml:"fallback_scrape_protocol,omitempty"` + // Whether to scrape a classic histogram, even if it is also exposed as a native histogram. + AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"` + // Whether to convert all scraped classic histograms into a native histogram with custom buckets. + ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"` + // File to which scrape failures are logged. + ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` // The HTTP resource path on which to fetch metrics from targets. MetricsPath string `yaml:"metrics_path,omitempty"` // The URL scheme with which to fetch metrics from targets. @@ -684,6 +716,7 @@ type ScrapeConfig struct { func (c *ScrapeConfig) SetDirectory(dir string) { c.ServiceDiscoveryConfigs.SetDirectory(dir) c.HTTPClientConfig.SetDirectory(dir) + c.ScrapeFailureLogFile = config.JoinDir(dir, c.ScrapeFailureLogFile) } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -765,6 +798,9 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { if c.KeepDroppedTargets == 0 { c.KeepDroppedTargets = globalConfig.KeepDroppedTargets } + if c.ScrapeFailureLogFile == "" { + c.ScrapeFailureLogFile = globalConfig.ScrapeFailureLogFile + } if c.ScrapeProtocols == nil { c.ScrapeProtocols = globalConfig.ScrapeProtocols @@ -773,11 +809,17 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName) } + if c.ScrapeFallbackProtocol != "" { + if err := c.ScrapeFallbackProtocol.Validate(); err != nil { + return fmt.Errorf("invalid fallback_scrape_protocol for scrape config with job name %q: %w", c.JobName, err) + } + } + switch globalConfig.MetricNameValidationScheme { - case "", LegacyValidationConfig: - case UTF8ValidationConfig: + case LegacyValidationConfig: + case "", UTF8ValidationConfig: if model.NameValidationScheme != model.UTF8Validation { - return fmt.Errorf("utf8 name validation requested but feature not enabled via --enable-feature=utf8-names") + panic("utf8 name validation requested but model.NameValidationScheme is not set to UTF8") } default: return fmt.Errorf("unknown name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme) @@ -948,6 +990,7 @@ func (a AlertmanagerConfigs) ToMap() map[string]*AlertmanagerConfig { // AlertmanagerAPIVersion represents a version of the // github.com/prometheus/alertmanager/api, e.g. 'v1' or 'v2'. +// 'v1' is no longer supported. type AlertmanagerAPIVersion string // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -977,7 +1020,7 @@ const ( ) var SupportedAlertmanagerAPIVersions = []AlertmanagerAPIVersion{ - AlertmanagerAPIVersionV1, AlertmanagerAPIVersionV2, + AlertmanagerAPIVersionV2, } // AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with. diff --git a/vendor/github.com/prometheus/prometheus/discovery/README.md b/vendor/github.com/prometheus/prometheus/discovery/README.md index 4c066086256..d5418e7fb11 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/README.md +++ b/vendor/github.com/prometheus/prometheus/discovery/README.md @@ -233,7 +233,7 @@ type Config interface { } type DiscovererOptions struct { - Logger log.Logger + Logger *slog.Logger // A registerer for the Discoverer's metrics. Registerer prometheus.Registerer diff --git a/vendor/github.com/prometheus/prometheus/discovery/discovery.go b/vendor/github.com/prometheus/prometheus/discovery/discovery.go index a91faf6c864..c400de3632f 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/discovery.go +++ b/vendor/github.com/prometheus/prometheus/discovery/discovery.go @@ -15,9 +15,9 @@ package discovery import ( "context" + "log/slog" "reflect" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -47,7 +47,7 @@ type DiscovererMetrics interface { // DiscovererOptions provides options for a Discoverer. type DiscovererOptions struct { - Logger log.Logger + Logger *slog.Logger Metrics DiscovererMetrics @@ -109,7 +109,7 @@ func (c *Configs) SetDirectory(dir string) { // UnmarshalYAML implements yaml.Unmarshaler. func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { - cfgTyp := getConfigType(configsType) + cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() @@ -124,7 +124,7 @@ func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { // MarshalYAML implements yaml.Marshaler. func (c Configs) MarshalYAML() (interface{}, error) { - cfgTyp := getConfigType(configsType) + cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index cefa90a8669..87e0ecc44b5 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -16,14 +16,14 @@ package discovery import ( "context" "fmt" + "log/slog" "reflect" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -81,9 +81,9 @@ func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]Discovere } // NewManager is the Discovery Manager constructor. -func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager { +func NewManager(ctx context.Context, logger *slog.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } mgr := &Manager{ logger: logger, @@ -104,7 +104,7 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil { mgr.metrics = metrics } else { - level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err) + logger.Error("Failed to create discovery manager metrics", "manager", mgr.name, "err", err) return nil } @@ -141,7 +141,7 @@ func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) { // Manager maintains a set of discovery providers and sends each update to a map channel. // Targets are grouped by the target set name. type Manager struct { - logger log.Logger + logger *slog.Logger name string httpOpts []config.HTTPClientOption mtx sync.RWMutex @@ -294,7 +294,7 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D } func (m *Manager) startProvider(ctx context.Context, p *Provider) { - level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) + m.logger.Debug("Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) ctx, cancel := context.WithCancel(ctx) updates := make(chan []*targetgroup.Group) @@ -328,7 +328,7 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ case tgs, ok := <-updates: m.metrics.ReceivedUpdates.Inc() if !ok { - level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) + m.logger.Debug("Discoverer channel closed", "provider", p.name) // Wait for provider cancellation to ensure targets are cleaned up when expected. <-ctx.Done() return @@ -364,7 +364,7 @@ func (m *Manager) sender() { case m.syncCh <- m.allGroups(): default: m.metrics.DelayedUpdates.Inc() - level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") + m.logger.Debug("Discovery receiver's channel was full so will retry the next cycle") select { case m.triggerSend <- struct{}{}: default: @@ -458,12 +458,12 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { } typ := cfg.Name() d, err := cfg.NewDiscoverer(DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ, "config", setName), + Logger: m.logger.With("discovery", typ, "config", setName), HTTPClientOptions: m.httpOpts, Metrics: m.sdMetrics[typ], }) if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) + m.logger.Error("Cannot create service discovery", "err", err, "type", typ, "config", setName) failed++ return } diff --git a/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go b/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go index f037a90cff0..31646c0e4c1 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go +++ b/vendor/github.com/prometheus/prometheus/discovery/refresh/refresh.go @@ -16,17 +16,17 @@ package refresh import ( "context" "errors" + "log/slog" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" ) type Options struct { - Logger log.Logger + Logger *slog.Logger Mech string Interval time.Duration RefreshF func(ctx context.Context) ([]*targetgroup.Group, error) @@ -35,7 +35,7 @@ type Options struct { // Discovery implements the Discoverer interface. type Discovery struct { - logger log.Logger + logger *slog.Logger interval time.Duration refreshf func(ctx context.Context) ([]*targetgroup.Group, error) metrics *discovery.RefreshMetrics @@ -45,9 +45,9 @@ type Discovery struct { func NewDiscovery(opts Options) *Discovery { m := opts.MetricsInstantiator.Instantiate(opts.Mech) - var logger log.Logger + var logger *slog.Logger if opts.Logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } else { logger = opts.Logger } @@ -68,7 +68,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { tgs, err := d.refresh(ctx) if err != nil { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(d.logger).Log("msg", "Unable to refresh target groups", "err", err.Error()) + d.logger.Error("Unable to refresh target groups", "err", err.Error()) } } else { select { @@ -87,7 +87,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { tgs, err := d.refresh(ctx) if err != nil { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(d.logger).Log("msg", "Unable to refresh target groups", "err", err.Error()) + d.logger.Error("Unable to refresh target groups", "err", err.Error()) } continue } diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go index 300f3176e42..a6ad47acd3b 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go @@ -304,6 +304,14 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { h.ZeroCount /= scalar h.Count /= scalar h.Sum /= scalar + // Division by zero removes all buckets. + if scalar == 0 { + h.PositiveBuckets = nil + h.NegativeBuckets = nil + h.PositiveSpans = nil + h.NegativeSpans = nil + return h + } for i := range h.PositiveBuckets { h.PositiveBuckets[i] /= scalar } @@ -353,7 +361,7 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) { default: // All other cases shouldn't actually happen. // They are a direct collision of CounterReset and NotCounterReset. - // Conservatively set the CounterResetHint to "unknown" and isse a warning. + // Conservatively set the CounterResetHint to "unknown" and issue a warning. h.CounterResetHint = UnknownCounterReset // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place } @@ -669,7 +677,7 @@ func detectReset(currIt, prevIt *floatBucketIterator) bool { if !currIt.Next() { // Reached end of currIt early, therefore // previous histogram has a bucket that the - // current one does not have. Unlass all + // current one does not have. Unless all // remaining buckets in the previous histogram // are unpopulated, this is a reset. for { @@ -902,7 +910,7 @@ func (h *FloatHistogram) trimBucketsInZeroBucket() { // reconcileZeroBuckets finds a zero bucket large enough to include the zero // buckets of both histograms (the receiving histogram and the other histogram) // with a zero threshold that is not within a populated bucket in either -// histogram. This method modifies the receiving histogram accourdingly, but +// histogram. This method modifies the receiving histogram accordingly, but // leaves the other histogram as is. Instead, it returns the zero count the // other histogram would have if it were modified. func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go index d7bdc1e0768..99529a38367 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go @@ -230,5 +230,5 @@ func contains(s []Label, n string) bool { } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go index c8bce51234a..c64bb990e02 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go @@ -16,7 +16,6 @@ package labels import ( - "reflect" "slices" "strings" "unsafe" @@ -299,10 +298,8 @@ func Equal(ls, o Labels) bool { func EmptyLabels() Labels { return Labels{} } -func yoloBytes(s string) (b []byte) { - *(*string)(unsafe.Pointer(&b)) = s - (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) - return +func yoloBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) } // New returns a sorted Labels from the given labels. @@ -338,8 +335,8 @@ func Compare(a, b Labels) int { } i := 0 // First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned. - sp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&shorter)).Data) - lp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&longer)).Data) + sp := unsafe.Pointer(unsafe.StringData(shorter)) + lp := unsafe.Pointer(unsafe.StringData(longer)) for ; i < len(shorter)-8; i += 8 { if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) { break diff --git a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go index 3238190e989..0847c819c59 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go @@ -100,13 +100,13 @@ func newFastRegexMatcherWithoutCache(v string) (*FastRegexMatcher, error) { // available, even if the string matcher is faster. m.matchString = m.stringMatcher.Matches } else { - parsed, err := syntax.Parse(v, syntax.Perl) + parsed, err := syntax.Parse(v, syntax.Perl|syntax.DotNL) if err != nil { return nil, err } // Simplify the syntax tree to run faster. parsed = parsed.Simplify() - m.re, err = regexp.Compile("^(?:" + parsed.String() + ")$") + m.re, err = regexp.Compile("^(?s:" + parsed.String() + ")$") if err != nil { return nil, err } diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index a880465969a..eb79f7be21c 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -171,7 +171,7 @@ type Regexp struct { // NewRegexp creates a new anchored Regexp and returns an error if the // passed-in regular expression does not compile. func NewRegexp(s string) (Regexp, error) { - regex, err := regexp.Compile("^(?:" + s + ")$") + regex, err := regexp.Compile("^(?s:" + s + ")$") return Regexp{Regexp: regex}, err } @@ -218,8 +218,8 @@ func (re Regexp) String() string { } str := re.Regexp.String() - // Trim the anchor `^(?:` prefix and `)$` suffix. - return str[4 : len(str)-2] + // Trim the anchor `^(?s:` prefix and `)$` suffix. + return str[5 : len(str)-2] } // Process returns a relabeled version of the given label set. The relabel configurations @@ -277,6 +277,13 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return false } case Replace: + // Fast path to add or delete label pair. + if val == "" && cfg.Regex == DefaultRelabelConfig.Regex && + !varInRegexTemplate(cfg.TargetLabel) && !varInRegexTemplate(cfg.Replacement) { + lb.Set(cfg.TargetLabel, cfg.Replacement) + break + } + indexes := cfg.Regex.FindStringSubmatchIndex(val) // If there is no match no replacement must take place. if indexes == nil { @@ -326,3 +333,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return true } + +func varInRegexTemplate(template string) bool { + return strings.Contains(template, "$") +} diff --git a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go index ec00b935401..4f6ae43f736 100644 --- a/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go +++ b/vendor/github.com/prometheus/prometheus/model/rulefmt/rulefmt.go @@ -111,6 +111,20 @@ func (g *RuleGroups) Validate(node ruleGroups) (errs []error) { ) } + for k, v := range g.Labels { + if !model.LabelName(k).IsValid() || k == model.MetricNameLabel { + errs = append( + errs, fmt.Errorf("invalid label name: %s", k), + ) + } + + if !model.LabelValue(v).IsValid() { + errs = append( + errs, fmt.Errorf("invalid label value: %s", v), + ) + } + } + set[g.Name] = struct{}{} for i, r := range g.Rules { @@ -143,10 +157,11 @@ type RuleGroup struct { EvaluationDelay *model.Duration `yaml:"evaluation_delay,omitempty"` QueryOffset *model.Duration `yaml:"query_offset,omitempty"` - Limit int `yaml:"limit,omitempty"` - Rules []RuleNode `yaml:"rules"` - SourceTenants []string `yaml:"source_tenants,omitempty"` - AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty"` + Limit int `yaml:"limit,omitempty"` + Rules []RuleNode `yaml:"rules"` + Labels map[string]string `yaml:"labels,omitempty"` + SourceTenants []string `yaml:"source_tenants,omitempty"` + AlignEvaluationTimeOnInterval bool `yaml:"align_evaluation_time_on_interval,omitempty"` } // Rule describes an alerting or recording rule. diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go index 0b5d9281e4d..26828552819 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go @@ -14,6 +14,8 @@ package textparse import ( + "errors" + "fmt" "mime" "github.com/prometheus/common/model" @@ -23,8 +25,7 @@ import ( "github.com/prometheus/prometheus/model/labels" ) -// Parser parses samples from a byte slice of samples in the official -// Prometheus and OpenMetrics text exposition formats. +// Parser parses samples from a byte slice of samples in different exposition formats. type Parser interface { // Series returns the bytes of a series with a simple float64 as a // value, the timestamp if set, and the value of the current sample. @@ -58,6 +59,8 @@ type Parser interface { // Metric writes the labels of the current sample into the passed labels. // It returns the string from which the metric was parsed. + // The values of the "le" labels of classic histograms and "quantile" labels + // of summaries should follow the OpenMetrics formatting rules. Metric(l *labels.Labels) string // Exemplar writes the exemplar of the current sample into the passed @@ -69,6 +72,8 @@ type Parser interface { // CreatedTimestamp returns the created timestamp (in milliseconds) for the // current sample. It returns nil if it is unknown e.g. if it wasn't set, // if the scrape protocol or metric type does not support created timestamps. + // Assume the CreatedTimestamp returned pointer is only valid until + // the Next iteration. CreatedTimestamp() *int64 // Next advances the parser to the next sample. @@ -76,26 +81,65 @@ type Parser interface { Next() (Entry, error) } -// New returns a new parser of the byte slice. -// -// This function always returns a valid parser, but might additionally -// return an error if the content type cannot be parsed. -func New(b []byte, contentType string, parseClassicHistograms bool, st *labels.SymbolTable) (Parser, error) { +// extractMediaType returns the mediaType of a required parser. It tries first to +// extract a valid and supported mediaType from contentType. If that fails, +// the provided fallbackType (possibly an empty string) is returned, together with +// an error. fallbackType is used as-is without further validation. +func extractMediaType(contentType, fallbackType string) (string, error) { if contentType == "" { - return NewPromParser(b, st), nil + if fallbackType == "" { + return "", errors.New("non-compliant scrape target sending blank Content-Type and no fallback_scrape_protocol specified for target") + } + return fallbackType, fmt.Errorf("non-compliant scrape target sending blank Content-Type, using fallback_scrape_protocol %q", fallbackType) } + // We have a contentType, parse it. mediaType, _, err := mime.ParseMediaType(contentType) if err != nil { - return NewPromParser(b, st), err + if fallbackType == "" { + retErr := fmt.Errorf("cannot parse Content-Type %q and no fallback_scrape_protocol for target", contentType) + return "", errors.Join(retErr, err) + } + retErr := fmt.Errorf("could not parse received Content-Type %q, using fallback_scrape_protocol %q", contentType, fallbackType) + return fallbackType, errors.Join(retErr, err) + } + + // We have a valid media type, either we recognise it and can use it + // or we have to error. + switch mediaType { + case "application/openmetrics-text", "application/vnd.google.protobuf", "text/plain": + return mediaType, nil + } + // We're here because we have no recognised mediaType. + if fallbackType == "" { + return "", fmt.Errorf("received unsupported Content-Type %q and no fallback_scrape_protocol specified for target", contentType) } + return fallbackType, fmt.Errorf("received unsupported Content-Type %q, using fallback_scrape_protocol %q", contentType, fallbackType) +} + +// New returns a new parser of the byte slice. +// +// This function no longer guarantees to return a valid parser. +// +// It only returns a valid parser if the supplied contentType and fallbackType allow. +// An error may also be returned if fallbackType had to be used or there was some +// other error parsing the supplied Content-Type. +// If the returned parser is nil then the scrape must fail. +func New(b []byte, contentType, fallbackType string, parseClassicHistograms, skipOMCTSeries bool, st *labels.SymbolTable) (Parser, error) { + mediaType, err := extractMediaType(contentType, fallbackType) + // err may be nil or something we want to warn about. + switch mediaType { case "application/openmetrics-text": - return NewOpenMetricsParser(b, st), nil + return NewOpenMetricsParser(b, st, func(o *openMetricsParserOptions) { + o.SkipCTSeries = skipOMCTSeries + }), err case "application/vnd.google.protobuf": - return NewProtobufParser(b, parseClassicHistograms, st), nil + return NewProtobufParser(b, parseClassicHistograms, st), err + case "text/plain": + return NewPromParser(b, st), err default: - return NewPromParser(b, st), nil + return nil, err } } diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go new file mode 100644 index 00000000000..d019c327c37 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/textparse/nhcbparse.go @@ -0,0 +1,376 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package textparse + +import ( + "errors" + "io" + "math" + "strconv" + "strings" + + "github.com/prometheus/common/model" + + "github.com/prometheus/prometheus/model/exemplar" + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/util/convertnhcb" +) + +type collectionState int + +const ( + stateStart collectionState = iota + stateCollecting + stateEmitting +) + +// The NHCBParser wraps a Parser and converts classic histograms to native +// histograms with custom buckets. +// +// Since Parser interface is line based, this parser needs to keep track +// of the last classic histogram series it saw to collate them into a +// single native histogram. +// +// Note: +// - Only series that have the histogram metadata type are considered for +// conversion. +// - The classic series are also returned if keepClassicHistograms is true. +type NHCBParser struct { + // The parser we're wrapping. + parser Parser + // Option to keep classic histograms along with converted histograms. + keepClassicHistograms bool + + // Labels builder. + builder labels.ScratchBuilder + + // State of the parser. + state collectionState + + // Caches the values from the underlying parser. + // For Series and Histogram. + bytes []byte + ts *int64 + value float64 + h *histogram.Histogram + fh *histogram.FloatHistogram + // For Metric. + lset labels.Labels + metricString string + // For Type. + bName []byte + typ model.MetricType + + // Caches the entry itself if we are inserting a converted NHCB + // halfway through. + entry Entry + err error + + // Caches the values and metric for the inserted converted NHCB. + bytesNHCB []byte + hNHCB *histogram.Histogram + fhNHCB *histogram.FloatHistogram + lsetNHCB labels.Labels + exemplars []exemplar.Exemplar + ctNHCB *int64 + metricStringNHCB string + + // Collates values from the classic histogram series to build + // the converted histogram later. + tempLsetNHCB labels.Labels + tempNHCB convertnhcb.TempHistogram + tempExemplars []exemplar.Exemplar + tempExemplarCount int + tempCT *int64 + + // Remembers the last base histogram metric name (assuming it's + // a classic histogram) so we can tell if the next float series + // is part of the same classic histogram. + lastHistogramName string + lastHistogramLabelsHash uint64 + lastHistogramExponential bool + // Reused buffer for hashing labels. + hBuffer []byte +} + +func NewNHCBParser(p Parser, st *labels.SymbolTable, keepClassicHistograms bool) Parser { + return &NHCBParser{ + parser: p, + keepClassicHistograms: keepClassicHistograms, + builder: labels.NewScratchBuilderWithSymbolTable(st, 16), + tempNHCB: convertnhcb.NewTempHistogram(), + } +} + +func (p *NHCBParser) Series() ([]byte, *int64, float64) { + return p.bytes, p.ts, p.value +} + +func (p *NHCBParser) Histogram() ([]byte, *int64, *histogram.Histogram, *histogram.FloatHistogram) { + if p.state == stateEmitting { + return p.bytesNHCB, p.ts, p.hNHCB, p.fhNHCB + } + return p.bytes, p.ts, p.h, p.fh +} + +func (p *NHCBParser) Help() ([]byte, []byte) { + return p.parser.Help() +} + +func (p *NHCBParser) Type() ([]byte, model.MetricType) { + return p.bName, p.typ +} + +func (p *NHCBParser) Unit() ([]byte, []byte) { + return p.parser.Unit() +} + +func (p *NHCBParser) Comment() []byte { + return p.parser.Comment() +} + +func (p *NHCBParser) Metric(l *labels.Labels) string { + if p.state == stateEmitting { + *l = p.lsetNHCB + return p.metricStringNHCB + } + *l = p.lset + return p.metricString +} + +func (p *NHCBParser) Exemplar(ex *exemplar.Exemplar) bool { + if p.state == stateEmitting { + if len(p.exemplars) == 0 { + return false + } + *ex = p.exemplars[0] + p.exemplars = p.exemplars[1:] + return true + } + return p.parser.Exemplar(ex) +} + +func (p *NHCBParser) CreatedTimestamp() *int64 { + switch p.state { + case stateStart: + if p.entry == EntrySeries || p.entry == EntryHistogram { + return p.parser.CreatedTimestamp() + } + case stateCollecting: + return p.tempCT + case stateEmitting: + return p.ctNHCB + } + return nil +} + +func (p *NHCBParser) Next() (Entry, error) { + if p.state == stateEmitting { + p.state = stateStart + if p.entry == EntrySeries { + isNHCB := p.handleClassicHistogramSeries(p.lset) + if isNHCB && !p.keepClassicHistograms { + // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. + return p.Next() + } + } + return p.entry, p.err + } + + p.entry, p.err = p.parser.Next() + if p.err != nil { + if errors.Is(p.err, io.EOF) && p.processNHCB() { + return EntryHistogram, nil + } + return EntryInvalid, p.err + } + switch p.entry { + case EntrySeries: + p.bytes, p.ts, p.value = p.parser.Series() + p.metricString = p.parser.Metric(&p.lset) + // Check the label set to see if we can continue or need to emit the NHCB. + var isNHCB bool + if p.compareLabels() { + // Labels differ. Check if we can emit the NHCB. + if p.processNHCB() { + return EntryHistogram, nil + } + isNHCB = p.handleClassicHistogramSeries(p.lset) + } else { + // Labels are the same. Check if after an exponential histogram. + if p.lastHistogramExponential { + isNHCB = false + } else { + isNHCB = p.handleClassicHistogramSeries(p.lset) + } + } + if isNHCB && !p.keepClassicHistograms { + // Do not return the classic histogram series if it was converted to NHCB and we are not keeping classic histograms. + return p.Next() + } + return p.entry, p.err + case EntryHistogram: + p.bytes, p.ts, p.h, p.fh = p.parser.Histogram() + p.metricString = p.parser.Metric(&p.lset) + p.storeExponentialLabels() + case EntryType: + p.bName, p.typ = p.parser.Type() + } + if p.processNHCB() { + return EntryHistogram, nil + } + return p.entry, p.err +} + +// Return true if labels have changed and we should emit the NHCB. +func (p *NHCBParser) compareLabels() bool { + if p.state != stateCollecting { + return false + } + if p.typ != model.MetricTypeHistogram { + // Different metric type. + return true + } + if p.lastHistogramName != convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) { + // Different metric name. + return true + } + nextHash, _ := p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel) + // Different label values. + return p.lastHistogramLabelsHash != nextHash +} + +// Save the label set of the classic histogram without suffix and bucket `le` label. +func (p *NHCBParser) storeClassicLabels() { + p.lastHistogramName = convertnhcb.GetHistogramMetricBaseName(p.lset.Get(labels.MetricName)) + p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer, labels.BucketLabel) + p.lastHistogramExponential = false +} + +func (p *NHCBParser) storeExponentialLabels() { + p.lastHistogramName = p.lset.Get(labels.MetricName) + p.lastHistogramLabelsHash, _ = p.lset.HashWithoutLabels(p.hBuffer) + p.lastHistogramExponential = true +} + +// handleClassicHistogramSeries collates the classic histogram series to be converted to NHCB +// if it is actually a classic histogram series (and not a normal float series) and if there +// isn't already a native histogram with the same name (assuming it is always processed +// right before the classic histograms) and returns true if the collation was done. +func (p *NHCBParser) handleClassicHistogramSeries(lset labels.Labels) bool { + if p.typ != model.MetricTypeHistogram { + return false + } + mName := lset.Get(labels.MetricName) + // Sanity check to ensure that the TYPE metadata entry name is the same as the base name. + if convertnhcb.GetHistogramMetricBaseName(mName) != string(p.bName) { + return false + } + switch { + case strings.HasSuffix(mName, "_bucket") && lset.Has(labels.BucketLabel): + le, err := strconv.ParseFloat(lset.Get(labels.BucketLabel), 64) + if err == nil && !math.IsNaN(le) { + p.processClassicHistogramSeries(lset, "_bucket", func(hist *convertnhcb.TempHistogram) { + hist.BucketCounts[le] = p.value + }) + return true + } + case strings.HasSuffix(mName, "_count"): + p.processClassicHistogramSeries(lset, "_count", func(hist *convertnhcb.TempHistogram) { + hist.Count = p.value + }) + return true + case strings.HasSuffix(mName, "_sum"): + p.processClassicHistogramSeries(lset, "_sum", func(hist *convertnhcb.TempHistogram) { + hist.Sum = p.value + }) + return true + } + return false +} + +func (p *NHCBParser) processClassicHistogramSeries(lset labels.Labels, suffix string, updateHist func(*convertnhcb.TempHistogram)) { + if p.state != stateCollecting { + p.storeClassicLabels() + p.tempCT = p.parser.CreatedTimestamp() + p.state = stateCollecting + } + p.tempLsetNHCB = convertnhcb.GetHistogramMetricBase(lset, suffix) + p.storeExemplars() + updateHist(&p.tempNHCB) +} + +func (p *NHCBParser) storeExemplars() { + for ex := p.nextExemplarPtr(); p.parser.Exemplar(ex); ex = p.nextExemplarPtr() { + p.tempExemplarCount++ + } +} + +func (p *NHCBParser) nextExemplarPtr() *exemplar.Exemplar { + switch { + case p.tempExemplarCount == len(p.tempExemplars)-1: + // Reuse the previously allocated exemplar, it was not filled up. + case len(p.tempExemplars) == cap(p.tempExemplars): + // Let the runtime grow the slice. + p.tempExemplars = append(p.tempExemplars, exemplar.Exemplar{}) + default: + // Take the next element into use. + p.tempExemplars = p.tempExemplars[:len(p.tempExemplars)+1] + } + return &p.tempExemplars[len(p.tempExemplars)-1] +} + +func (p *NHCBParser) swapExemplars() { + p.exemplars = p.tempExemplars[:p.tempExemplarCount] + p.tempExemplars = p.tempExemplars[:0] + p.tempExemplarCount = 0 +} + +// processNHCB converts the collated classic histogram series to NHCB and caches the info +// to be returned to callers. Retruns true if the conversion was successful. +func (p *NHCBParser) processNHCB() bool { + if p.state != stateCollecting { + return false + } + ub := make([]float64, 0, len(p.tempNHCB.BucketCounts)) + for b := range p.tempNHCB.BucketCounts { + ub = append(ub, b) + } + upperBounds, hBase := convertnhcb.ProcessUpperBoundsAndCreateBaseHistogram(ub, false) + fhBase := hBase.ToFloat(nil) + h, fh := convertnhcb.NewHistogram(p.tempNHCB, upperBounds, hBase, fhBase) + if h != nil { + if err := h.Validate(); err != nil { + return false + } + p.hNHCB = h + p.fhNHCB = nil + } else if fh != nil { + if err := fh.Validate(); err != nil { + return false + } + p.hNHCB = nil + p.fhNHCB = fh + } + p.metricStringNHCB = p.tempLsetNHCB.Get(labels.MetricName) + strings.ReplaceAll(p.tempLsetNHCB.DropMetricName().String(), ", ", ",") + p.bytesNHCB = []byte(p.metricStringNHCB) + p.lsetNHCB = p.tempLsetNHCB + p.swapExemplars() + p.ctNHCB = p.tempCT + p.tempNHCB = convertnhcb.NewTempHistogram() + p.state = stateEmitting + p.tempCT = nil + return true +} diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index 5f0415d3ee9..3ae9c7ddfc3 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -17,13 +17,16 @@ package textparse import ( + "bytes" "errors" "fmt" "io" "math" + "strconv" "strings" "unicode/utf8" + "github.com/cespare/xxhash/v2" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" @@ -72,15 +75,16 @@ func (l *openMetricsLexer) Error(es string) { // OpenMetrics text exposition format. // This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit type OpenMetricsParser struct { - l *openMetricsLexer - builder labels.ScratchBuilder - series []byte - text []byte - mtype model.MetricType - val float64 - ts int64 - hasTS bool - start int + l *openMetricsLexer + builder labels.ScratchBuilder + series []byte + mfNameLen int // length of metric family name to get from series. + text []byte + mtype model.MetricType + val float64 + ts int64 + hasTS bool + start int // offsets is a list of offsets into series that describe the positions // of the metric name and label names and values for this series. // p.offsets[0] is the start character of the metric name. @@ -95,7 +99,15 @@ type OpenMetricsParser struct { exemplarTs int64 hasExemplarTs bool - skipCTSeries bool + // Created timestamp parsing state. + ct int64 + ctHashSet uint64 + // ignoreExemplar instructs the parser to not overwrite exemplars (to keep them while peeking ahead). + ignoreExemplar bool + // visitedMFName is the metric family name of the last visited metric when peeking ahead + // for _created series during the execution of the CreatedTimestamp method. + visitedMFName []byte + skipCTSeries bool } type openMetricsParserOptions struct { @@ -201,7 +213,7 @@ func (p *OpenMetricsParser) Metric(l *labels.Labels) string { label := unreplace(s[a:b]) c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start - value := unreplace(s[c:d]) + value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d])) p.builder.Add(label, value) } @@ -252,87 +264,144 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { // CreatedTimestamp returns the created timestamp for a current Metric if exists or nil. // NOTE(Maniktherana): Might use additional CPU/mem resources due to deep copy of parser required for peeking given 1.0 OM specification on _created series. func (p *OpenMetricsParser) CreatedTimestamp() *int64 { - if !TypeRequiresCT(p.mtype) { + if !typeRequiresCT(p.mtype) { // Not a CT supported metric type, fast path. + p.ctHashSet = 0 // Use ctHashSet as a single way of telling "empty cache" return nil } var ( - currLset labels.Labels - buf []byte - peekWithoutNameLsetHash uint64 + buf []byte + currName []byte ) - p.Metric(&currLset) - currFamilyLsetHash, buf := currLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") - // Search for the _created line for the currFamilyLsetHash using ephemeral parser until - // we see EOF or new metric family. We have to do it as we don't know where (and if) - // that CT line is. - // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. - peek := deepCopy(p) + if len(p.series) > 1 && p.series[0] == '{' && p.series[1] == '"' { + // special case for UTF-8 encoded metric family names. + currName = p.series[p.offsets[0]-p.start : p.mfNameLen+2] + } else { + currName = p.series[p.offsets[0]-p.start : p.mfNameLen] + } + + currHash := p.seriesHash(&buf, currName) + // Check cache, perhaps we fetched something already. + if currHash == p.ctHashSet && p.ct > 0 { + return &p.ct + } + + // Create a new lexer to reset the parser once this function is done executing. + resetLexer := &openMetricsLexer{ + b: p.l.b, + i: p.l.i, + start: p.l.start, + err: p.l.err, + state: p.l.state, + } + + p.skipCTSeries = false + + p.ignoreExemplar = true + savedStart := p.start + defer func() { + p.ignoreExemplar = false + p.start = savedStart + p.l = resetLexer + }() + for { - eType, err := peek.Next() + eType, err := p.Next() if err != nil { - // This means peek will give error too later on, so def no CT line found. + // This means p.Next() will give error too later on, so def no CT line found. // This might result in partial scrape with wrong/missing CT, but only // spec improvement would help. - // TODO(bwplotka): Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + // TODO: Make sure OM 1.1/2.0 pass CT via metadata or exemplar-like to avoid this. + p.resetCTParseValues() return nil } if eType != EntrySeries { // Assume we hit different family, no CT line found. + p.resetCTParseValues() return nil } - var peekedLset labels.Labels - peek.Metric(&peekedLset) - peekedName := peekedLset.Get(model.MetricNameLabel) - if !strings.HasSuffix(peekedName, "_created") { + peekedName := p.series[p.offsets[0]-p.start : p.offsets[1]-p.start] + if len(peekedName) < 8 || string(peekedName[len(peekedName)-8:]) != "_created" { // Not a CT line, search more. continue } - // We got a CT line here, but let's search if CT line is actually for our series, edge case. - peekWithoutNameLsetHash, _ = peekedLset.HashWithoutLabels(buf, labels.MetricName, "le", "quantile") - if peekWithoutNameLsetHash != currFamilyLsetHash { - // CT line for a different series, for our series no CT. + // Remove _created suffix. + peekedHash := p.seriesHash(&buf, peekedName[:len(peekedName)-8]) + if peekedHash != currHash { + // Found CT line for a different series, for our series no CT. + p.resetCTParseValues() return nil } - ct := int64(peek.val) + + // All timestamps in OpenMetrics are Unix Epoch in seconds. Convert to milliseconds. + // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#timestamps + ct := int64(p.val * 1000.0) + p.setCTParseValues(ct, currHash, currName, true) return &ct } } -// TypeRequiresCT returns true if the metric type requires a _created timestamp. -func TypeRequiresCT(t model.MetricType) bool { - switch t { - case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram: - return true - default: - return false +var ( + leBytes = []byte{108, 101} + quantileBytes = []byte{113, 117, 97, 110, 116, 105, 108, 101} +) + +// seriesHash generates a hash based on the metric family name and the offsets +// of label names and values from the parsed OpenMetrics data. It skips quantile +// and le labels for summaries and histograms respectively. +func (p *OpenMetricsParser) seriesHash(offsetsArr *[]byte, metricFamilyName []byte) uint64 { + // Iterate through p.offsets to find the label names and values. + for i := 2; i < len(p.offsets); i += 4 { + lStart := p.offsets[i] - p.start + lEnd := p.offsets[i+1] - p.start + label := p.series[lStart:lEnd] + // Skip quantile and le labels for summaries and histograms. + if p.mtype == model.MetricTypeSummary && bytes.Equal(label, quantileBytes) { + continue + } + if p.mtype == model.MetricTypeHistogram && bytes.Equal(label, leBytes) { + continue + } + *offsetsArr = append(*offsetsArr, p.series[lStart:lEnd]...) + vStart := p.offsets[i+2] - p.start + vEnd := p.offsets[i+3] - p.start + *offsetsArr = append(*offsetsArr, p.series[vStart:vEnd]...) } + + *offsetsArr = append(*offsetsArr, metricFamilyName...) + hashedOffsets := xxhash.Sum64(*offsetsArr) + + // Reset the offsets array for later reuse. + *offsetsArr = (*offsetsArr)[:0] + return hashedOffsets } -// deepCopy creates a copy of a parser without re-using the slices' original memory addresses. -func deepCopy(p *OpenMetricsParser) OpenMetricsParser { - newB := make([]byte, len(p.l.b)) - copy(newB, p.l.b) +// setCTParseValues sets the parser to the state after CreatedTimestamp method was called and CT was found. +// This is useful to prevent re-parsing the same series again and early return the CT value. +func (p *OpenMetricsParser) setCTParseValues(ct int64, ctHashSet uint64, mfName []byte, skipCTSeries bool) { + p.ct = ct + p.ctHashSet = ctHashSet + p.visitedMFName = mfName + p.skipCTSeries = skipCTSeries // Do we need to set it? +} - newLexer := &openMetricsLexer{ - b: newB, - i: p.l.i, - start: p.l.start, - err: p.l.err, - state: p.l.state, - } +// resetCtParseValues resets the parser to the state before CreatedTimestamp method was called. +func (p *OpenMetricsParser) resetCTParseValues() { + p.ctHashSet = 0 + p.skipCTSeries = true +} - newParser := OpenMetricsParser{ - l: newLexer, - builder: p.builder, - mtype: p.mtype, - val: p.val, - skipCTSeries: false, +// typeRequiresCT returns true if the metric type requires a _created timestamp. +func typeRequiresCT(t model.MetricType) bool { + switch t { + case model.MetricTypeCounter, model.MetricTypeSummary, model.MetricTypeHistogram: + return true + default: + return false } - return newParser } // nextToken returns the next token from the openMetricsLexer. @@ -356,10 +425,12 @@ func (p *OpenMetricsParser) Next() (Entry, error) { p.start = p.l.i p.offsets = p.offsets[:0] - p.eOffsets = p.eOffsets[:0] - p.exemplar = p.exemplar[:0] - p.exemplarVal = 0 - p.hasExemplarTs = false + if !p.ignoreExemplar { + p.eOffsets = p.eOffsets[:0] + p.exemplar = p.exemplar[:0] + p.exemplarVal = 0 + p.hasExemplarTs = false + } switch t := p.nextToken(); t { case tEOFWord: @@ -378,6 +449,7 @@ func (p *OpenMetricsParser) Next() (Entry, error) { mStart++ mEnd-- } + p.mfNameLen = mEnd - mStart p.offsets = append(p.offsets, mStart, mEnd) default: return EntryInvalid, p.parseError("expected metric name after "+t.String(), t2) @@ -483,6 +555,16 @@ func (p *OpenMetricsParser) Next() (Entry, error) { func (p *OpenMetricsParser) parseComment() error { var err error + + if p.ignoreExemplar { + for t := p.nextToken(); t != tLinebreak; t = p.nextToken() { + if t == tEOF { + return errors.New("data does not end with # EOF") + } + } + return nil + } + // Parse the labels. p.eOffsets, err = p.parseLVals(p.eOffsets, true) if err != nil { @@ -591,10 +673,9 @@ func (p *OpenMetricsParser) parseLVals(offsets []int, isExemplar bool) ([]int, e // isCreatedSeries returns true if the current series is a _created series. func (p *OpenMetricsParser) isCreatedSeries() bool { - var newLbs labels.Labels - p.Metric(&newLbs) - name := newLbs.Get(model.MetricNameLabel) - if TypeRequiresCT(p.mtype) && strings.HasSuffix(name, "_created") { + metricName := p.series[p.offsets[0]-p.start : p.offsets[1]-p.start] + // check length so the metric is longer than len("_created") + if typeRequiresCT(p.mtype) && len(metricName) >= 8 && string(metricName[len(metricName)-8:]) == "_created" { return true } return false @@ -663,3 +744,15 @@ func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error } return val, nil } + +// normalizeFloatsInLabelValues ensures that values of the "le" labels of classic histograms and "quantile" labels +// of summaries follow OpenMetrics formatting rules. +func normalizeFloatsInLabelValues(t model.MetricType, l, v string) string { + if (t == model.MetricTypeSummary && l == model.QuantileLabel) || (t == model.MetricTypeHistogram && l == model.BucketLabel) { + f, err := strconv.ParseFloat(v, 64) + if err == nil { + return formatOpenMetricsFloat(f) + } + } + return v +} diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go index a611f3aea76..0ab932c665b 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go @@ -239,7 +239,8 @@ func (p *PromParser) Metric(l *labels.Labels) string { label := unreplace(s[a:b]) c := p.offsets[i+2] - p.start d := p.offsets[i+3] - p.start - value := unreplace(s[c:d]) + value := normalizeFloatsInLabelValues(p.mtype, label, unreplace(s[c:d])) + p.builder.Add(label, value) } @@ -502,7 +503,7 @@ func unreplace(s string) string { } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } func parseFloat(s string) (float64, error) { diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.nometa.txt b/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.nometa.txt deleted file mode 100644 index 235f0aa464b..00000000000 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.nometa.txt +++ /dev/null @@ -1,411 +0,0 @@ -go_gc_duration_seconds{quantile="0"} 4.9351e-05 -go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 -go_gc_duration_seconds{quantile="0.5"} 8.3835e-05 -go_gc_duration_seconds{quantile="0.75"} 0.000106744 -go_gc_duration_seconds{quantile="1"} 0.002072195 -go_gc_duration_seconds_sum 0.012139815 -go_gc_duration_seconds_count 99 -go_goroutines 33 -go_memstats_alloc_bytes 1.7518624e+07 -go_memstats_alloc_bytes_total 8.3062296e+08 -go_memstats_buck_hash_sys_bytes 1.494637e+06 -go_memstats_frees_total 4.65658e+06 -go_memstats_gc_sys_bytes 1.107968e+06 -go_memstats_heap_alloc_bytes 1.7518624e+07 -go_memstats_heap_idle_bytes 6.668288e+06 -go_memstats_heap_inuse_bytes 1.8956288e+07 -go_memstats_heap_objects 72755 -go_memstats_heap_released_bytes_total 0 -go_memstats_heap_sys_bytes 2.5624576e+07 -go_memstats_last_gc_time_seconds 1.4843955586166437e+09 -go_memstats_lookups_total 2089 -go_memstats_mallocs_total 4.729335e+06 -go_memstats_mcache_inuse_bytes 9600 -go_memstats_mcache_sys_bytes 16384 -go_memstats_mspan_inuse_bytes 211520 -go_memstats_mspan_sys_bytes 245760 -go_memstats_next_gc_bytes 2.033527e+07 -go_memstats_other_sys_bytes 2.077323e+06 -go_memstats_stack_inuse_bytes 1.6384e+06 -go_memstats_stack_sys_bytes 1.6384e+06 -go_memstats_sys_bytes 3.2205048e+07 -http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN -http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN -http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="alerts"} 0 -http_request_duration_microseconds_count{handler="alerts"} 0 -http_request_duration_microseconds{handler="config",quantile="0.5"} NaN -http_request_duration_microseconds{handler="config",quantile="0.9"} NaN -http_request_duration_microseconds{handler="config",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="config"} 0 -http_request_duration_microseconds_count{handler="config"} 0 -http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN -http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN -http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="consoles"} 0 -http_request_duration_microseconds_count{handler="consoles"} 0 -http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN -http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN -http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="drop_series"} 0 -http_request_duration_microseconds_count{handler="drop_series"} 0 -http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN -http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN -http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="federate"} 0 -http_request_duration_microseconds_count{handler="federate"} 0 -http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN -http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN -http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="flags"} 0 -http_request_duration_microseconds_count{handler="flags"} 0 -http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655 -http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823 -http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823 -http_request_duration_microseconds_sum{handler="graph"} 5803.93 -http_request_duration_microseconds_count{handler="graph"} 3 -http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN -http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN -http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="heap"} 0 -http_request_duration_microseconds_count{handler="heap"} 0 -http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401 -http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708 -http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708 -http_request_duration_microseconds_sum{handler="label_values"} 3995.574 -http_request_duration_microseconds_count{handler="label_values"} 3 -http_request_duration_microseconds{handler="options",quantile="0.5"} NaN -http_request_duration_microseconds{handler="options",quantile="0.9"} NaN -http_request_duration_microseconds{handler="options",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="options"} 0 -http_request_duration_microseconds_count{handler="options"} 0 -http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859 -http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035 -http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523 -http_request_duration_microseconds_sum{handler="prometheus"} 661851.54 -http_request_duration_microseconds_count{handler="prometheus"} 462 -http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448 -http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558 -http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558 -http_request_duration_microseconds_sum{handler="query"} 26074.11 -http_request_duration_microseconds_count{handler="query"} 6 -http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN -http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN -http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="query_range"} 0 -http_request_duration_microseconds_count{handler="query_range"} 0 -http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN -http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN -http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="rules"} 0 -http_request_duration_microseconds_count{handler="rules"} 0 -http_request_duration_microseconds{handler="series",quantile="0.5"} NaN -http_request_duration_microseconds{handler="series",quantile="0.9"} NaN -http_request_duration_microseconds{handler="series",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="series"} 0 -http_request_duration_microseconds_count{handler="series"} 0 -http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311 -http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174 -http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174 -http_request_duration_microseconds_sum{handler="static"} 6458.621 -http_request_duration_microseconds_count{handler="static"} 3 -http_request_duration_microseconds{handler="status",quantile="0.5"} NaN -http_request_duration_microseconds{handler="status",quantile="0.9"} NaN -http_request_duration_microseconds{handler="status",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="status"} 0 -http_request_duration_microseconds_count{handler="status"} 0 -http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN -http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN -http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="targets"} 0 -http_request_duration_microseconds_count{handler="targets"} 0 -http_request_duration_microseconds{handler="version",quantile="0.5"} NaN -http_request_duration_microseconds{handler="version",quantile="0.9"} NaN -http_request_duration_microseconds{handler="version",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="version"} 0 -http_request_duration_microseconds_count{handler="version"} 0 -http_request_size_bytes{handler="alerts",quantile="0.5"} NaN -http_request_size_bytes{handler="alerts",quantile="0.9"} NaN -http_request_size_bytes{handler="alerts",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="alerts"} 0 -http_request_size_bytes_count{handler="alerts"} 0 -http_request_size_bytes{handler="config",quantile="0.5"} NaN -http_request_size_bytes{handler="config",quantile="0.9"} NaN -http_request_size_bytes{handler="config",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="config"} 0 -http_request_size_bytes_count{handler="config"} 0 -http_request_size_bytes{handler="consoles",quantile="0.5"} NaN -http_request_size_bytes{handler="consoles",quantile="0.9"} NaN -http_request_size_bytes{handler="consoles",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="consoles"} 0 -http_request_size_bytes_count{handler="consoles"} 0 -http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN -http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN -http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="drop_series"} 0 -http_request_size_bytes_count{handler="drop_series"} 0 -http_request_size_bytes{handler="federate",quantile="0.5"} NaN -http_request_size_bytes{handler="federate",quantile="0.9"} NaN -http_request_size_bytes{handler="federate",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="federate"} 0 -http_request_size_bytes_count{handler="federate"} 0 -http_request_size_bytes{handler="flags",quantile="0.5"} NaN -http_request_size_bytes{handler="flags",quantile="0.9"} NaN -http_request_size_bytes{handler="flags",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="flags"} 0 -http_request_size_bytes_count{handler="flags"} 0 -http_request_size_bytes{handler="graph",quantile="0.5"} 367 -http_request_size_bytes{handler="graph",quantile="0.9"} 389 -http_request_size_bytes{handler="graph",quantile="0.99"} 389 -http_request_size_bytes_sum{handler="graph"} 1145 -http_request_size_bytes_count{handler="graph"} 3 -http_request_size_bytes{handler="heap",quantile="0.5"} NaN -http_request_size_bytes{handler="heap",quantile="0.9"} NaN -http_request_size_bytes{handler="heap",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="heap"} 0 -http_request_size_bytes_count{handler="heap"} 0 -http_request_size_bytes{handler="label_values",quantile="0.5"} 416 -http_request_size_bytes{handler="label_values",quantile="0.9"} 416 -http_request_size_bytes{handler="label_values",quantile="0.99"} 416 -http_request_size_bytes_sum{handler="label_values"} 1248 -http_request_size_bytes_count{handler="label_values"} 3 -http_request_size_bytes{handler="options",quantile="0.5"} NaN -http_request_size_bytes{handler="options",quantile="0.9"} NaN -http_request_size_bytes{handler="options",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="options"} 0 -http_request_size_bytes_count{handler="options"} 0 -http_request_size_bytes{handler="prometheus",quantile="0.5"} 238 -http_request_size_bytes{handler="prometheus",quantile="0.9"} 238 -http_request_size_bytes{handler="prometheus",quantile="0.99"} 238 -http_request_size_bytes_sum{handler="prometheus"} 109956 -http_request_size_bytes_count{handler="prometheus"} 462 -http_request_size_bytes{handler="query",quantile="0.5"} 531 -http_request_size_bytes{handler="query",quantile="0.9"} 531 -http_request_size_bytes{handler="query",quantile="0.99"} 531 -http_request_size_bytes_sum{handler="query"} 3186 -http_request_size_bytes_count{handler="query"} 6 -http_request_size_bytes{handler="query_range",quantile="0.5"} NaN -http_request_size_bytes{handler="query_range",quantile="0.9"} NaN -http_request_size_bytes{handler="query_range",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="query_range"} 0 -http_request_size_bytes_count{handler="query_range"} 0 -http_request_size_bytes{handler="rules",quantile="0.5"} NaN -http_request_size_bytes{handler="rules",quantile="0.9"} NaN -http_request_size_bytes{handler="rules",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="rules"} 0 -http_request_size_bytes_count{handler="rules"} 0 -http_request_size_bytes{handler="series",quantile="0.5"} NaN -http_request_size_bytes{handler="series",quantile="0.9"} NaN -http_request_size_bytes{handler="series",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="series"} 0 -http_request_size_bytes_count{handler="series"} 0 -http_request_size_bytes{handler="static",quantile="0.5"} 379 -http_request_size_bytes{handler="static",quantile="0.9"} 379 -http_request_size_bytes{handler="static",quantile="0.99"} 379 -http_request_size_bytes_sum{handler="static"} 1137 -http_request_size_bytes_count{handler="static"} 3 -http_request_size_bytes{handler="status",quantile="0.5"} NaN -http_request_size_bytes{handler="status",quantile="0.9"} NaN -http_request_size_bytes{handler="status",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="status"} 0 -http_request_size_bytes_count{handler="status"} 0 -http_request_size_bytes{handler="targets",quantile="0.5"} NaN -http_request_size_bytes{handler="targets",quantile="0.9"} NaN -http_request_size_bytes{handler="targets",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="targets"} 0 -http_request_size_bytes_count{handler="targets"} 0 -http_request_size_bytes{handler="version",quantile="0.5"} NaN -http_request_size_bytes{handler="version",quantile="0.9"} NaN -http_request_size_bytes{handler="version",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="version"} 0 -http_request_size_bytes_count{handler="version"} 0 -http_requests_total{code="200",handler="graph",method="get"} 3 -http_requests_total{code="200",handler="label_values",method="get"} 3 -http_requests_total{code="200",handler="prometheus",method="get"} 462 -http_requests_total{code="200",handler="query",method="get"} 6 -http_requests_total{code="200",handler="static",method="get"} 3 -http_response_size_bytes{handler="alerts",quantile="0.5"} NaN -http_response_size_bytes{handler="alerts",quantile="0.9"} NaN -http_response_size_bytes{handler="alerts",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="alerts"} 0 -http_response_size_bytes_count{handler="alerts"} 0 -http_response_size_bytes{handler="config",quantile="0.5"} NaN -http_response_size_bytes{handler="config",quantile="0.9"} NaN -http_response_size_bytes{handler="config",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="config"} 0 -http_response_size_bytes_count{handler="config"} 0 -http_response_size_bytes{handler="consoles",quantile="0.5"} NaN -http_response_size_bytes{handler="consoles",quantile="0.9"} NaN -http_response_size_bytes{handler="consoles",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="consoles"} 0 -http_response_size_bytes_count{handler="consoles"} 0 -http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN -http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN -http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="drop_series"} 0 -http_response_size_bytes_count{handler="drop_series"} 0 -http_response_size_bytes{handler="federate",quantile="0.5"} NaN -http_response_size_bytes{handler="federate",quantile="0.9"} NaN -http_response_size_bytes{handler="federate",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="federate"} 0 -http_response_size_bytes_count{handler="federate"} 0 -http_response_size_bytes{handler="flags",quantile="0.5"} NaN -http_response_size_bytes{handler="flags",quantile="0.9"} NaN -http_response_size_bytes{handler="flags",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="flags"} 0 -http_response_size_bytes_count{handler="flags"} 0 -http_response_size_bytes{handler="graph",quantile="0.5"} 3619 -http_response_size_bytes{handler="graph",quantile="0.9"} 3619 -http_response_size_bytes{handler="graph",quantile="0.99"} 3619 -http_response_size_bytes_sum{handler="graph"} 10857 -http_response_size_bytes_count{handler="graph"} 3 -http_response_size_bytes{handler="heap",quantile="0.5"} NaN -http_response_size_bytes{handler="heap",quantile="0.9"} NaN -http_response_size_bytes{handler="heap",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="heap"} 0 -http_response_size_bytes_count{handler="heap"} 0 -http_response_size_bytes{handler="label_values",quantile="0.5"} 642 -http_response_size_bytes{handler="label_values",quantile="0.9"} 642 -http_response_size_bytes{handler="label_values",quantile="0.99"} 642 -http_response_size_bytes_sum{handler="label_values"} 1926 -http_response_size_bytes_count{handler="label_values"} 3 -http_response_size_bytes{handler="options",quantile="0.5"} NaN -http_response_size_bytes{handler="options",quantile="0.9"} NaN -http_response_size_bytes{handler="options",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="options"} 0 -http_response_size_bytes_count{handler="options"} 0 -http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033 -http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123 -http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128 -http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06 -http_response_size_bytes_count{handler="prometheus"} 462 -http_response_size_bytes{handler="query",quantile="0.5"} 776 -http_response_size_bytes{handler="query",quantile="0.9"} 781 -http_response_size_bytes{handler="query",quantile="0.99"} 781 -http_response_size_bytes_sum{handler="query"} 4656 -http_response_size_bytes_count{handler="query"} 6 -http_response_size_bytes{handler="query_range",quantile="0.5"} NaN -http_response_size_bytes{handler="query_range",quantile="0.9"} NaN -http_response_size_bytes{handler="query_range",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="query_range"} 0 -http_response_size_bytes_count{handler="query_range"} 0 -http_response_size_bytes{handler="rules",quantile="0.5"} NaN -http_response_size_bytes{handler="rules",quantile="0.9"} NaN -http_response_size_bytes{handler="rules",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="rules"} 0 -http_response_size_bytes_count{handler="rules"} 0 -http_response_size_bytes{handler="series",quantile="0.5"} NaN -http_response_size_bytes{handler="series",quantile="0.9"} NaN -http_response_size_bytes{handler="series",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="series"} 0 -http_response_size_bytes_count{handler="series"} 0 -http_response_size_bytes{handler="static",quantile="0.5"} 6316 -http_response_size_bytes{handler="static",quantile="0.9"} 6316 -http_response_size_bytes{handler="static",quantile="0.99"} 6316 -http_response_size_bytes_sum{handler="static"} 18948 -http_response_size_bytes_count{handler="static"} 3 -http_response_size_bytes{handler="status",quantile="0.5"} NaN -http_response_size_bytes{handler="status",quantile="0.9"} NaN -http_response_size_bytes{handler="status",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="status"} 0 -http_response_size_bytes_count{handler="status"} 0 -http_response_size_bytes{handler="targets",quantile="0.5"} NaN -http_response_size_bytes{handler="targets",quantile="0.9"} NaN -http_response_size_bytes{handler="targets",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="targets"} 0 -http_response_size_bytes_count{handler="targets"} 0 -http_response_size_bytes{handler="version",quantile="0.5"} NaN -http_response_size_bytes{handler="version",quantile="0.9"} NaN -http_response_size_bytes{handler="version",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="version"} 0 -http_response_size_bytes_count{handler="version"} 0 -prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1 -prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09 -prometheus_config_last_reload_successful 1 -prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds_count 1 -prometheus_evaluator_iterations_skipped_total 0 -prometheus_notifications_dropped_total 0 -prometheus_notifications_queue_capacity 10000 -prometheus_notifications_queue_length 0 -prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0 -prometheus_rule_evaluation_failures_total{rule_type="recording"} 0 -prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_azure_refresh_duration_seconds_sum 0 -prometheus_sd_azure_refresh_duration_seconds_count 0 -prometheus_sd_azure_refresh_failures_total 0 -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN -prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN -prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_failures_total 0 -prometheus_sd_dns_lookup_failures_total 0 -prometheus_sd_dns_lookups_total 0 -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_ec2_refresh_duration_seconds_sum 0 -prometheus_sd_ec2_refresh_duration_seconds_count 0 -prometheus_sd_ec2_refresh_failures_total 0 -prometheus_sd_file_read_errors_total 0 -prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN -prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN -prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN -prometheus_sd_file_scan_duration_seconds_sum 0 -prometheus_sd_file_scan_duration_seconds_count 0 -prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN -prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN -prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN -prometheus_sd_gce_refresh_duration_sum 0 -prometheus_sd_gce_refresh_duration_count 0 -prometheus_sd_gce_refresh_failures_total 0 -prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_marathon_refresh_duration_seconds_sum 0 -prometheus_sd_marathon_refresh_duration_seconds_count 0 -prometheus_sd_marathon_refresh_failures_total 0 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006 -prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995 -prometheus_target_interval_length_seconds_count{interval="50ms"} 685 -prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 -prometheus_target_skipped_scrapes_total 0 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002 -prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002 -prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 -prometheus_treecache_watcher_goroutines 0 -prometheus_treecache_zookeeper_failures_total 0 -# EOF diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.txt b/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.txt deleted file mode 100644 index 174f383e911..00000000000 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promtestdata.txt +++ /dev/null @@ -1,529 +0,0 @@ -# HELP go_gc_duration_seconds A summary of the GC invocation durations. -# TYPE go_gc_duration_seconds summary -go_gc_duration_seconds{quantile="0"} 4.9351e-05 -go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05 -go_gc_duration_seconds{quantile="0.5"} 8.3835e-05 -go_gc_duration_seconds{quantile="0.75"} 0.000106744 -go_gc_duration_seconds{quantile="1"} 0.002072195 -go_gc_duration_seconds_sum 0.012139815 -go_gc_duration_seconds_count 99 -# HELP go_goroutines Number of goroutines that currently exist. -# TYPE go_goroutines gauge -go_goroutines 33 -# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use. -# TYPE go_memstats_alloc_bytes gauge -go_memstats_alloc_bytes 1.7518624e+07 -# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed. -# TYPE go_memstats_alloc_bytes_total counter -go_memstats_alloc_bytes_total 8.3062296e+08 -# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. -# TYPE go_memstats_buck_hash_sys_bytes gauge -go_memstats_buck_hash_sys_bytes 1.494637e+06 -# HELP go_memstats_frees_total Total number of frees. -# TYPE go_memstats_frees_total counter -go_memstats_frees_total 4.65658e+06 -# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. -# TYPE go_memstats_gc_sys_bytes gauge -go_memstats_gc_sys_bytes 1.107968e+06 -# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use. -# TYPE go_memstats_heap_alloc_bytes gauge -go_memstats_heap_alloc_bytes 1.7518624e+07 -# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. -# TYPE go_memstats_heap_idle_bytes gauge -go_memstats_heap_idle_bytes 6.668288e+06 -# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. -# TYPE go_memstats_heap_inuse_bytes gauge -go_memstats_heap_inuse_bytes 1.8956288e+07 -# HELP go_memstats_heap_objects Number of allocated objects. -# TYPE go_memstats_heap_objects gauge -go_memstats_heap_objects 72755 -# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS. -# TYPE go_memstats_heap_released_bytes_total counter -go_memstats_heap_released_bytes_total 0 -# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. -# TYPE go_memstats_heap_sys_bytes gauge -go_memstats_heap_sys_bytes 2.5624576e+07 -# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection. -# TYPE go_memstats_last_gc_time_seconds gauge -go_memstats_last_gc_time_seconds 1.4843955586166437e+09 -# HELP go_memstats_lookups_total Total number of pointer lookups. -# TYPE go_memstats_lookups_total counter -go_memstats_lookups_total 2089 -# HELP go_memstats_mallocs_total Total number of mallocs. -# TYPE go_memstats_mallocs_total counter -go_memstats_mallocs_total 4.729335e+06 -# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. -# TYPE go_memstats_mcache_inuse_bytes gauge -go_memstats_mcache_inuse_bytes 9600 -# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. -# TYPE go_memstats_mcache_sys_bytes gauge -go_memstats_mcache_sys_bytes 16384 -# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. -# TYPE go_memstats_mspan_inuse_bytes gauge -go_memstats_mspan_inuse_bytes 211520 -# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. -# TYPE go_memstats_mspan_sys_bytes gauge -go_memstats_mspan_sys_bytes 245760 -# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. -# TYPE go_memstats_next_gc_bytes gauge -go_memstats_next_gc_bytes 2.033527e+07 -# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. -# TYPE go_memstats_other_sys_bytes gauge -go_memstats_other_sys_bytes 2.077323e+06 -# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator. -# TYPE go_memstats_stack_inuse_bytes gauge -go_memstats_stack_inuse_bytes 1.6384e+06 -# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. -# TYPE go_memstats_stack_sys_bytes gauge -go_memstats_stack_sys_bytes 1.6384e+06 -# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations. -# TYPE go_memstats_sys_bytes gauge -go_memstats_sys_bytes 3.2205048e+07 -# HELP http_request_duration_microseconds The HTTP request latencies in microseconds. -# TYPE http_request_duration_microseconds summary -http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN -http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN -http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="alerts"} 0 -http_request_duration_microseconds_count{handler="alerts"} 0 -http_request_duration_microseconds{handler="config",quantile="0.5"} NaN -http_request_duration_microseconds{handler="config",quantile="0.9"} NaN -http_request_duration_microseconds{handler="config",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="config"} 0 -http_request_duration_microseconds_count{handler="config"} 0 -http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN -http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN -http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="consoles"} 0 -http_request_duration_microseconds_count{handler="consoles"} 0 -http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN -http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN -http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="drop_series"} 0 -http_request_duration_microseconds_count{handler="drop_series"} 0 -http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN -http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN -http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="federate"} 0 -http_request_duration_microseconds_count{handler="federate"} 0 -http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN -http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN -http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="flags"} 0 -http_request_duration_microseconds_count{handler="flags"} 0 -http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655 -http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823 -http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823 -http_request_duration_microseconds_sum{handler="graph"} 5803.93 -http_request_duration_microseconds_count{handler="graph"} 3 -http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN -http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN -http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="heap"} 0 -http_request_duration_microseconds_count{handler="heap"} 0 -http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401 -http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708 -http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708 -http_request_duration_microseconds_sum{handler="label_values"} 3995.574 -http_request_duration_microseconds_count{handler="label_values"} 3 -http_request_duration_microseconds{handler="options",quantile="0.5"} NaN -http_request_duration_microseconds{handler="options",quantile="0.9"} NaN -http_request_duration_microseconds{handler="options",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="options"} 0 -http_request_duration_microseconds_count{handler="options"} 0 -http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859 -http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035 -http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523 -http_request_duration_microseconds_sum{handler="prometheus"} 661851.54 -http_request_duration_microseconds_count{handler="prometheus"} 462 -http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448 -http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558 -http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558 -http_request_duration_microseconds_sum{handler="query"} 26074.11 -http_request_duration_microseconds_count{handler="query"} 6 -http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN -http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN -http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="query_range"} 0 -http_request_duration_microseconds_count{handler="query_range"} 0 -http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN -http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN -http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="rules"} 0 -http_request_duration_microseconds_count{handler="rules"} 0 -http_request_duration_microseconds{handler="series",quantile="0.5"} NaN -http_request_duration_microseconds{handler="series",quantile="0.9"} NaN -http_request_duration_microseconds{handler="series",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="series"} 0 -http_request_duration_microseconds_count{handler="series"} 0 -http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311 -http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174 -http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174 -http_request_duration_microseconds_sum{handler="static"} 6458.621 -http_request_duration_microseconds_count{handler="static"} 3 -http_request_duration_microseconds{handler="status",quantile="0.5"} NaN -http_request_duration_microseconds{handler="status",quantile="0.9"} NaN -http_request_duration_microseconds{handler="status",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="status"} 0 -http_request_duration_microseconds_count{handler="status"} 0 -http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN -http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN -http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="targets"} 0 -http_request_duration_microseconds_count{handler="targets"} 0 -http_request_duration_microseconds{handler="version",quantile="0.5"} NaN -http_request_duration_microseconds{handler="version",quantile="0.9"} NaN -http_request_duration_microseconds{handler="version",quantile="0.99"} NaN -http_request_duration_microseconds_sum{handler="version"} 0 -http_request_duration_microseconds_count{handler="version"} 0 -# HELP http_request_size_bytes The HTTP request sizes in bytes. -# TYPE http_request_size_bytes summary -http_request_size_bytes{handler="alerts",quantile="0.5"} NaN -http_request_size_bytes{handler="alerts",quantile="0.9"} NaN -http_request_size_bytes{handler="alerts",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="alerts"} 0 -http_request_size_bytes_count{handler="alerts"} 0 -http_request_size_bytes{handler="config",quantile="0.5"} NaN -http_request_size_bytes{handler="config",quantile="0.9"} NaN -http_request_size_bytes{handler="config",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="config"} 0 -http_request_size_bytes_count{handler="config"} 0 -http_request_size_bytes{handler="consoles",quantile="0.5"} NaN -http_request_size_bytes{handler="consoles",quantile="0.9"} NaN -http_request_size_bytes{handler="consoles",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="consoles"} 0 -http_request_size_bytes_count{handler="consoles"} 0 -http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN -http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN -http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="drop_series"} 0 -http_request_size_bytes_count{handler="drop_series"} 0 -http_request_size_bytes{handler="federate",quantile="0.5"} NaN -http_request_size_bytes{handler="federate",quantile="0.9"} NaN -http_request_size_bytes{handler="federate",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="federate"} 0 -http_request_size_bytes_count{handler="federate"} 0 -http_request_size_bytes{handler="flags",quantile="0.5"} NaN -http_request_size_bytes{handler="flags",quantile="0.9"} NaN -http_request_size_bytes{handler="flags",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="flags"} 0 -http_request_size_bytes_count{handler="flags"} 0 -http_request_size_bytes{handler="graph",quantile="0.5"} 367 -http_request_size_bytes{handler="graph",quantile="0.9"} 389 -http_request_size_bytes{handler="graph",quantile="0.99"} 389 -http_request_size_bytes_sum{handler="graph"} 1145 -http_request_size_bytes_count{handler="graph"} 3 -http_request_size_bytes{handler="heap",quantile="0.5"} NaN -http_request_size_bytes{handler="heap",quantile="0.9"} NaN -http_request_size_bytes{handler="heap",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="heap"} 0 -http_request_size_bytes_count{handler="heap"} 0 -http_request_size_bytes{handler="label_values",quantile="0.5"} 416 -http_request_size_bytes{handler="label_values",quantile="0.9"} 416 -http_request_size_bytes{handler="label_values",quantile="0.99"} 416 -http_request_size_bytes_sum{handler="label_values"} 1248 -http_request_size_bytes_count{handler="label_values"} 3 -http_request_size_bytes{handler="options",quantile="0.5"} NaN -http_request_size_bytes{handler="options",quantile="0.9"} NaN -http_request_size_bytes{handler="options",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="options"} 0 -http_request_size_bytes_count{handler="options"} 0 -http_request_size_bytes{handler="prometheus",quantile="0.5"} 238 -http_request_size_bytes{handler="prometheus",quantile="0.9"} 238 -http_request_size_bytes{handler="prometheus",quantile="0.99"} 238 -http_request_size_bytes_sum{handler="prometheus"} 109956 -http_request_size_bytes_count{handler="prometheus"} 462 -http_request_size_bytes{handler="query",quantile="0.5"} 531 -http_request_size_bytes{handler="query",quantile="0.9"} 531 -http_request_size_bytes{handler="query",quantile="0.99"} 531 -http_request_size_bytes_sum{handler="query"} 3186 -http_request_size_bytes_count{handler="query"} 6 -http_request_size_bytes{handler="query_range",quantile="0.5"} NaN -http_request_size_bytes{handler="query_range",quantile="0.9"} NaN -http_request_size_bytes{handler="query_range",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="query_range"} 0 -http_request_size_bytes_count{handler="query_range"} 0 -http_request_size_bytes{handler="rules",quantile="0.5"} NaN -http_request_size_bytes{handler="rules",quantile="0.9"} NaN -http_request_size_bytes{handler="rules",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="rules"} 0 -http_request_size_bytes_count{handler="rules"} 0 -http_request_size_bytes{handler="series",quantile="0.5"} NaN -http_request_size_bytes{handler="series",quantile="0.9"} NaN -http_request_size_bytes{handler="series",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="series"} 0 -http_request_size_bytes_count{handler="series"} 0 -http_request_size_bytes{handler="static",quantile="0.5"} 379 -http_request_size_bytes{handler="static",quantile="0.9"} 379 -http_request_size_bytes{handler="static",quantile="0.99"} 379 -http_request_size_bytes_sum{handler="static"} 1137 -http_request_size_bytes_count{handler="static"} 3 -http_request_size_bytes{handler="status",quantile="0.5"} NaN -http_request_size_bytes{handler="status",quantile="0.9"} NaN -http_request_size_bytes{handler="status",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="status"} 0 -http_request_size_bytes_count{handler="status"} 0 -http_request_size_bytes{handler="targets",quantile="0.5"} NaN -http_request_size_bytes{handler="targets",quantile="0.9"} NaN -http_request_size_bytes{handler="targets",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="targets"} 0 -http_request_size_bytes_count{handler="targets"} 0 -http_request_size_bytes{handler="version",quantile="0.5"} NaN -http_request_size_bytes{handler="version",quantile="0.9"} NaN -http_request_size_bytes{handler="version",quantile="0.99"} NaN -http_request_size_bytes_sum{handler="version"} 0 -http_request_size_bytes_count{handler="version"} 0 -# HELP http_requests_total Total number of HTTP requests made. -# TYPE http_requests_total counter -http_requests_total{code="200",handler="graph",method="get"} 3 -http_requests_total{code="200",handler="label_values",method="get"} 3 -http_requests_total{code="200",handler="prometheus",method="get"} 462 -http_requests_total{code="200",handler="query",method="get"} 6 -http_requests_total{code="200",handler="static",method="get"} 3 -# HELP http_response_size_bytes The HTTP response sizes in bytes. -# TYPE http_response_size_bytes summary -http_response_size_bytes{handler="alerts",quantile="0.5"} NaN -http_response_size_bytes{handler="alerts",quantile="0.9"} NaN -http_response_size_bytes{handler="alerts",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="alerts"} 0 -http_response_size_bytes_count{handler="alerts"} 0 -http_response_size_bytes{handler="config",quantile="0.5"} NaN -http_response_size_bytes{handler="config",quantile="0.9"} NaN -http_response_size_bytes{handler="config",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="config"} 0 -http_response_size_bytes_count{handler="config"} 0 -http_response_size_bytes{handler="consoles",quantile="0.5"} NaN -http_response_size_bytes{handler="consoles",quantile="0.9"} NaN -http_response_size_bytes{handler="consoles",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="consoles"} 0 -http_response_size_bytes_count{handler="consoles"} 0 -http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN -http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN -http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="drop_series"} 0 -http_response_size_bytes_count{handler="drop_series"} 0 -http_response_size_bytes{handler="federate",quantile="0.5"} NaN -http_response_size_bytes{handler="federate",quantile="0.9"} NaN -http_response_size_bytes{handler="federate",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="federate"} 0 -http_response_size_bytes_count{handler="federate"} 0 -http_response_size_bytes{handler="flags",quantile="0.5"} NaN -http_response_size_bytes{handler="flags",quantile="0.9"} NaN -http_response_size_bytes{handler="flags",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="flags"} 0 -http_response_size_bytes_count{handler="flags"} 0 -http_response_size_bytes{handler="graph",quantile="0.5"} 3619 -http_response_size_bytes{handler="graph",quantile="0.9"} 3619 -http_response_size_bytes{handler="graph",quantile="0.99"} 3619 -http_response_size_bytes_sum{handler="graph"} 10857 -http_response_size_bytes_count{handler="graph"} 3 -http_response_size_bytes{handler="heap",quantile="0.5"} NaN -http_response_size_bytes{handler="heap",quantile="0.9"} NaN -http_response_size_bytes{handler="heap",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="heap"} 0 -http_response_size_bytes_count{handler="heap"} 0 -http_response_size_bytes{handler="label_values",quantile="0.5"} 642 -http_response_size_bytes{handler="label_values",quantile="0.9"} 642 -http_response_size_bytes{handler="label_values",quantile="0.99"} 642 -http_response_size_bytes_sum{handler="label_values"} 1926 -http_response_size_bytes_count{handler="label_values"} 3 -http_response_size_bytes{handler="options",quantile="0.5"} NaN -http_response_size_bytes{handler="options",quantile="0.9"} NaN -http_response_size_bytes{handler="options",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="options"} 0 -http_response_size_bytes_count{handler="options"} 0 -http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033 -http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123 -http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128 -http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06 -http_response_size_bytes_count{handler="prometheus"} 462 -http_response_size_bytes{handler="query",quantile="0.5"} 776 -http_response_size_bytes{handler="query",quantile="0.9"} 781 -http_response_size_bytes{handler="query",quantile="0.99"} 781 -http_response_size_bytes_sum{handler="query"} 4656 -http_response_size_bytes_count{handler="query"} 6 -http_response_size_bytes{handler="query_range",quantile="0.5"} NaN -http_response_size_bytes{handler="query_range",quantile="0.9"} NaN -http_response_size_bytes{handler="query_range",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="query_range"} 0 -http_response_size_bytes_count{handler="query_range"} 0 -http_response_size_bytes{handler="rules",quantile="0.5"} NaN -http_response_size_bytes{handler="rules",quantile="0.9"} NaN -http_response_size_bytes{handler="rules",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="rules"} 0 -http_response_size_bytes_count{handler="rules"} 0 -http_response_size_bytes{handler="series",quantile="0.5"} NaN -http_response_size_bytes{handler="series",quantile="0.9"} NaN -http_response_size_bytes{handler="series",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="series"} 0 -http_response_size_bytes_count{handler="series"} 0 -http_response_size_bytes{handler="static",quantile="0.5"} 6316 -http_response_size_bytes{handler="static",quantile="0.9"} 6316 -http_response_size_bytes{handler="static",quantile="0.99"} 6316 -http_response_size_bytes_sum{handler="static"} 18948 -http_response_size_bytes_count{handler="static"} 3 -http_response_size_bytes{handler="status",quantile="0.5"} NaN -http_response_size_bytes{handler="status",quantile="0.9"} NaN -http_response_size_bytes{handler="status",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="status"} 0 -http_response_size_bytes_count{handler="status"} 0 -http_response_size_bytes{handler="targets",quantile="0.5"} NaN -http_response_size_bytes{handler="targets",quantile="0.9"} NaN -http_response_size_bytes{handler="targets",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="targets"} 0 -http_response_size_bytes_count{handler="targets"} 0 -http_response_size_bytes{handler="version",quantile="0.5"} NaN -http_response_size_bytes{handler="version",quantile="0.9"} NaN -http_response_size_bytes{handler="version",quantile="0.99"} NaN -http_response_size_bytes_sum{handler="version"} 0 -http_response_size_bytes_count{handler="version"} 0 -# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which prometheus was built. -# TYPE prometheus_build_info gauge -prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1 -# HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload. -# TYPE prometheus_config_last_reload_success_timestamp_seconds gauge -prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09 -# HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful. -# TYPE prometheus_config_last_reload_successful gauge -prometheus_config_last_reload_successful 1 -# HELP prometheus_evaluator_duration_seconds The duration of rule group evaluations. -# TYPE prometheus_evaluator_duration_seconds summary -prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06 -prometheus_evaluator_duration_seconds_count 1 -# HELP prometheus_evaluator_iterations_skipped_total The total number of rule group evaluations skipped due to throttled metric storage. -# TYPE prometheus_evaluator_iterations_skipped_total counter -prometheus_evaluator_iterations_skipped_total 0 -# HELP prometheus_notifications_dropped_total Total number of alerts dropped due to alert manager missing in configuration. -# TYPE prometheus_notifications_dropped_total counter -prometheus_notifications_dropped_total 0 -# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue. -# TYPE prometheus_notifications_queue_capacity gauge -prometheus_notifications_queue_capacity 10000 -# HELP prometheus_notifications_queue_length The number of alert notifications in the queue. -# TYPE prometheus_notifications_queue_length gauge -prometheus_notifications_queue_length 0 -# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures. -# TYPE prometheus_rule_evaluation_failures_total counter -prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0 -prometheus_rule_evaluation_failures_total{rule_type="recording"} 0 -# HELP prometheus_sd_azure_refresh_duration_seconds The duration of a Azure-SD refresh in seconds. -# TYPE prometheus_sd_azure_refresh_duration_seconds summary -prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_azure_refresh_duration_seconds_sum 0 -prometheus_sd_azure_refresh_duration_seconds_count 0 -# HELP prometheus_sd_azure_refresh_failures_total Number of Azure-SD refresh failures. -# TYPE prometheus_sd_azure_refresh_failures_total counter -prometheus_sd_azure_refresh_failures_total 0 -# HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds. -# TYPE prometheus_sd_consul_rpc_duration_seconds summary -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN -prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN -prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN -prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0 -prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0 -# HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures. -# TYPE prometheus_sd_consul_rpc_failures_total counter -prometheus_sd_consul_rpc_failures_total 0 -# HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures. -# TYPE prometheus_sd_dns_lookup_failures_total counter -prometheus_sd_dns_lookup_failures_total 0 -# HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups. -# TYPE prometheus_sd_dns_lookups_total counter -prometheus_sd_dns_lookups_total 0 -# HELP prometheus_sd_ec2_refresh_duration_seconds The duration of a EC2-SD refresh in seconds. -# TYPE prometheus_sd_ec2_refresh_duration_seconds summary -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_ec2_refresh_duration_seconds_sum 0 -prometheus_sd_ec2_refresh_duration_seconds_count 0 -# HELP prometheus_sd_ec2_refresh_failures_total The number of EC2-SD scrape failures. -# TYPE prometheus_sd_ec2_refresh_failures_total counter -prometheus_sd_ec2_refresh_failures_total 0 -# HELP prometheus_sd_file_read_errors_total The number of File-SD read errors. -# TYPE prometheus_sd_file_read_errors_total counter -prometheus_sd_file_read_errors_total 0 -# HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds. -# TYPE prometheus_sd_file_scan_duration_seconds summary -prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN -prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN -prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN -prometheus_sd_file_scan_duration_seconds_sum 0 -prometheus_sd_file_scan_duration_seconds_count 0 -# HELP prometheus_sd_gce_refresh_duration The duration of a GCE-SD refresh in seconds. -# TYPE prometheus_sd_gce_refresh_duration summary -prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN -prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN -prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN -prometheus_sd_gce_refresh_duration_sum 0 -prometheus_sd_gce_refresh_duration_count 0 -# HELP prometheus_sd_gce_refresh_failures_total The number of GCE-SD refresh failures. -# TYPE prometheus_sd_gce_refresh_failures_total counter -prometheus_sd_gce_refresh_failures_total 0 -# HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled. -# TYPE prometheus_sd_kubernetes_events_total counter -prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="add",role="service"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="node"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0 -prometheus_sd_kubernetes_events_total{event="update",role="service"} 0 -# HELP prometheus_sd_marathon_refresh_duration_seconds The duration of a Marathon-SD refresh in seconds. -# TYPE prometheus_sd_marathon_refresh_duration_seconds summary -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN -prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN -prometheus_sd_marathon_refresh_duration_seconds_sum 0 -prometheus_sd_marathon_refresh_duration_seconds_count 0 -# HELP prometheus_sd_marathon_refresh_failures_total The number of Marathon-SD refresh failures. -# TYPE prometheus_sd_marathon_refresh_failures_total counter -prometheus_sd_marathon_refresh_failures_total 0 -# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes. -# TYPE prometheus_target_interval_length_seconds summary -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556 -prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006 -prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995 -prometheus_target_interval_length_seconds_count{interval="50ms"} 685 -# HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool. -# TYPE prometheus_target_scrape_pool_sync_total counter -prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1 -# HELP prometheus_target_skipped_scrapes_total Total number of scrapes that were skipped because the metric storage was throttled. -# TYPE prometheus_target_skipped_scrapes_total counter -prometheus_target_skipped_scrapes_total 0 -# HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool. -# TYPE prometheus_target_sync_length_seconds summary -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002 -prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002 -prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002 -prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1 -# HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines. -# TYPE prometheus_treecache_watcher_goroutines gauge -prometheus_treecache_watcher_goroutines 0 -# HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures. -# TYPE prometheus_treecache_zookeeper_failures_total counter -prometheus_treecache_zookeeper_failures_total 0 -# EOF diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go index e384a75fca4..a77e1d728f3 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go @@ -20,7 +20,9 @@ import ( "fmt" "io" "math" + "strconv" "strings" + "sync" "unicode/utf8" "github.com/gogo/protobuf/proto" @@ -34,6 +36,15 @@ import ( dto "github.com/prometheus/prometheus/prompb/io/prometheus/client" ) +// floatFormatBufPool is exclusively used in formatOpenMetricsFloat. +var floatFormatBufPool = sync.Pool{ + New: func() interface{} { + // To contain at most 17 digits and additional syntax for a float64. + b := make([]byte, 0, 24) + return &b + }, +} + // ProtobufParser is a very inefficient way of unmarshaling the old Prometheus // protobuf format and then present it as it if were parsed by a // Prometheus-2-style text parser. This is only done so that we can easily plug @@ -457,6 +468,12 @@ func (p *ProtobufParser) Next() (Entry, error) { p.state = EntryHelp case EntryHelp: + if p.mf.Unit != "" { + p.state = EntryUnit + } else { + p.state = EntryType + } + case EntryUnit: p.state = EntryType case EntryType: t := p.mf.GetType() @@ -604,7 +621,7 @@ func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) } -// formatOpenMetricsFloat works like the usual Go string formatting of a fleat +// formatOpenMetricsFloat works like the usual Go string formatting of a float // but appends ".0" if the resulting number would otherwise contain neither a // "." nor an "e". func formatOpenMetricsFloat(f float64) string { @@ -623,11 +640,15 @@ func formatOpenMetricsFloat(f float64) string { case math.IsInf(f, -1): return "-Inf" } - s := fmt.Sprint(f) - if strings.ContainsAny(s, "e.") { - return s + bp := floatFormatBufPool.Get().(*[]byte) + defer floatFormatBufPool.Put(bp) + + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + if bytes.ContainsAny(*bp, "e.") { + return string(*bp) } - return s + ".0" + *bp = append(*bp, '.', '0') + return string(*bp) } // isNativeHistogram returns false iff the provided histograms has no spans at diff --git a/vendor/github.com/prometheus/prometheus/notifier/notifier.go b/vendor/github.com/prometheus/prometheus/notifier/notifier.go index 218e4cb8c74..e970b67e6d2 100644 --- a/vendor/github.com/prometheus/prometheus/notifier/notifier.go +++ b/vendor/github.com/prometheus/prometheus/notifier/notifier.go @@ -16,25 +16,28 @@ package notifier import ( "bytes" "context" + "crypto/md5" + "encoding/hex" "encoding/json" "fmt" "io" + "log/slog" "net/http" "net/url" "path" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/go-openapi/strfmt" "github.com/prometheus/alertmanager/api/v2/models" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/sigv4" "github.com/prometheus/common/version" "go.uber.org/atomic" + "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" @@ -117,7 +120,7 @@ type Manager struct { stopRequested chan struct{} alertmanagers map[string]*alertmanagerSet - logger log.Logger + logger *slog.Logger } // Options are the configurable parameters of a Handler. @@ -218,12 +221,12 @@ func do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp } // NewManager is the manager constructor. -func NewManager(o *Options, logger log.Logger) *Manager { +func NewManager(o *Options, logger *slog.Logger) *Manager { if o.Do == nil { o.Do = do } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } n := &Manager{ @@ -257,6 +260,16 @@ func (n *Manager) ApplyConfig(conf *config.Config) error { n.opts.RelabelConfigs = conf.AlertingConfig.AlertRelabelConfigs amSets := make(map[string]*alertmanagerSet) + // configToAlertmanagers maps alertmanager sets for each unique AlertmanagerConfig, + // helping to avoid dropping known alertmanagers and re-use them without waiting for SD updates when applying the config. + configToAlertmanagers := make(map[string]*alertmanagerSet, len(n.alertmanagers)) + for _, oldAmSet := range n.alertmanagers { + hash, err := oldAmSet.configHash() + if err != nil { + return err + } + configToAlertmanagers[hash] = oldAmSet + } for k, cfg := range conf.AlertingConfig.AlertmanagerConfigs.ToMap() { ams, err := newAlertmanagerSet(cfg, n.logger, n.metrics) @@ -264,6 +277,16 @@ func (n *Manager) ApplyConfig(conf *config.Config) error { return err } + hash, err := ams.configHash() + if err != nil { + return err + } + + if oldAmSet, ok := configToAlertmanagers[hash]; ok { + ams.ams = oldAmSet.ams + ams.droppedAms = oldAmSet.droppedAms + } + amSets[k] = ams } @@ -319,7 +342,7 @@ func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) { }() wg.Wait() - level.Info(n.logger).Log("msg", "Notification manager stopped") + n.logger.Info("Notification manager stopped") } // sendLoop continuously consumes the notifications queue and sends alerts to @@ -376,20 +399,20 @@ func (n *Manager) sendOneBatch() { func (n *Manager) drainQueue() { if !n.opts.DrainOnShutdown { if n.queueLen() > 0 { - level.Warn(n.logger).Log("msg", "Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen()) + n.logger.Warn("Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen()) n.metrics.dropped.Add(float64(n.queueLen())) } return } - level.Info(n.logger).Log("msg", "Draining any remaining notifications...") + n.logger.Info("Draining any remaining notifications...") for n.queueLen() > 0 { n.sendOneBatch() } - level.Info(n.logger).Log("msg", "Remaining notifications drained") + n.logger.Info("Remaining notifications drained") } func (n *Manager) reload(tgs map[string][]*targetgroup.Group) { @@ -399,7 +422,7 @@ func (n *Manager) reload(tgs map[string][]*targetgroup.Group) { for id, tgroup := range tgs { am, ok := n.alertmanagers[id] if !ok { - level.Error(n.logger).Log("msg", "couldn't sync alert manager set", "err", fmt.Sprintf("invalid id:%v", id)) + n.logger.Error("couldn't sync alert manager set", "err", fmt.Sprintf("invalid id:%v", id)) continue } am.sync(tgroup) @@ -422,7 +445,7 @@ func (n *Manager) Send(alerts ...*Alert) { if d := len(alerts) - n.opts.QueueCapacity; d > 0 { alerts = alerts[d:] - level.Warn(n.logger).Log("msg", "Alert batch larger than queue capacity, dropping alerts", "num_dropped", d) + n.logger.Warn("Alert batch larger than queue capacity, dropping alerts", "num_dropped", d) n.metrics.dropped.Add(float64(d)) } @@ -431,7 +454,7 @@ func (n *Manager) Send(alerts ...*Alert) { if d := (len(n.queue) + len(alerts)) - n.opts.QueueCapacity; d > 0 { n.queue = n.queue[d:] - level.Warn(n.logger).Log("msg", "Alert notification queue full, dropping alerts", "num_dropped", d) + n.logger.Warn("Alert notification queue full, dropping alerts", "num_dropped", d) n.metrics.dropped.Add(float64(d)) } n.queue = append(n.queue, alerts...) @@ -519,10 +542,10 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { begin := time.Now() - // v1Payload and v2Payload represent 'alerts' marshaled for Alertmanager API - // v1 or v2. Marshaling happens below. Reference here is for caching between + // cachedPayload represent 'alerts' marshaled for Alertmanager API v2. + // Marshaling happens below. Reference here is for caching between // for loop iterations. - var v1Payload, v2Payload []byte + var cachedPayload []byte n.mtx.RLock() amSets := n.alertmanagers @@ -553,42 +576,29 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { continue } // We can't use the cached values from previous iteration. - v1Payload, v2Payload = nil, nil + cachedPayload = nil } switch ams.cfg.APIVersion { - case config.AlertmanagerAPIVersionV1: - { - if v1Payload == nil { - v1Payload, err = json.Marshal(amAlerts) - if err != nil { - level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v1 failed", "err", err) - ams.mtx.RUnlock() - return false - } - } - - payload = v1Payload - } case config.AlertmanagerAPIVersionV2: { - if v2Payload == nil { + if cachedPayload == nil { openAPIAlerts := alertsToOpenAPIAlerts(amAlerts) - v2Payload, err = json.Marshal(openAPIAlerts) + cachedPayload, err = json.Marshal(openAPIAlerts) if err != nil { - level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v2 failed", "err", err) + n.logger.Error("Encoding alerts for Alertmanager API v2 failed", "err", err) ams.mtx.RUnlock() return false } } - payload = v2Payload + payload = cachedPayload } default: { - level.Error(n.logger).Log( - "msg", fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", ams.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions), + n.logger.Error( + fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", ams.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions), "err", err, ) ams.mtx.RUnlock() @@ -598,7 +608,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { if len(ams.cfg.AlertRelabelConfigs) > 0 { // We can't use the cached values on the next iteration. - v1Payload, v2Payload = nil, nil + cachedPayload = nil } for _, am := range ams.ams { @@ -609,7 +619,7 @@ func (n *Manager) sendAll(alerts ...*Alert) bool { go func(ctx context.Context, client *http.Client, url string, payload []byte, count int) { if err := n.sendOne(ctx, client, url, payload); err != nil { - level.Error(n.logger).Log("alertmanager", url, "count", count, "msg", "Error sending alert", "err", err) + n.logger.Error("Error sending alert", "alertmanager", url, "count", count, "err", err) n.metrics.errors.WithLabelValues(url).Inc() } else { numSuccess.Inc() @@ -689,7 +699,7 @@ func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []b // // Stop is safe to call multiple times. func (n *Manager) Stop() { - level.Info(n.logger).Log("msg", "Stopping notification manager...") + n.logger.Info("Stopping notification manager...") n.stopOnce.Do(func() { close(n.stopRequested) @@ -724,10 +734,10 @@ type alertmanagerSet struct { mtx sync.RWMutex ams []alertmanager droppedAms []alertmanager - logger log.Logger + logger *slog.Logger } -func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger log.Logger, metrics *alertMetrics) (*alertmanagerSet, error) { +func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger *slog.Logger, metrics *alertMetrics) (*alertmanagerSet, error) { client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager") if err != nil { return nil, err @@ -761,7 +771,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { for _, tg := range tgs { ams, droppedAms, err := AlertmanagerFromGroup(tg, s.cfg) if err != nil { - level.Error(s.logger).Log("msg", "Creating discovered Alertmanagers failed", "err", err) + s.logger.Error("Creating discovered Alertmanagers failed", "err", err) continue } allAms = append(allAms, ams...) @@ -770,6 +780,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { s.mtx.Lock() defer s.mtx.Unlock() + previousAms := s.ams // Set new Alertmanagers and deduplicate them along their unique URL. s.ams = []alertmanager{} s.droppedAms = []alertmanager{} @@ -789,6 +800,26 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) { seen[us] = struct{}{} s.ams = append(s.ams, am) } + // Now remove counters for any removed Alertmanagers. + for _, am := range previousAms { + us := am.url().String() + if _, ok := seen[us]; ok { + continue + } + s.metrics.latency.DeleteLabelValues(us) + s.metrics.sent.DeleteLabelValues(us) + s.metrics.errors.DeleteLabelValues(us) + seen[us] = struct{}{} + } +} + +func (s *alertmanagerSet) configHash() (string, error) { + b, err := yaml.Marshal(s.cfg) + if err != nil { + return "", err + } + hash := md5.Sum(b) + return hex.EncodeToString(hash[:]), nil } func postPath(pre string, v config.AlertmanagerAPIVersion) string { diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index 9454f5b5dce..55ac4be913c 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "reflect" "runtime" @@ -30,10 +31,9 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/trace" @@ -43,6 +43,7 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/util/annotations" @@ -125,7 +126,11 @@ type QueryEngine interface { // QueryLogger is an interface that can be used to log all the queries logged // by the engine. type QueryLogger interface { - Log(...interface{}) error + Error(msg string, args ...any) + Info(msg string, args ...any) + Debug(msg string, args ...any) + Warn(msg string, args ...any) + With(args ...any) Close() error } @@ -288,7 +293,7 @@ type QueryTracker interface { // EngineOpts contains configuration options used when creating a new Engine. type EngineOpts struct { - Logger log.Logger + Logger *slog.Logger Reg prometheus.Registerer MaxSamples int Timeout time.Duration @@ -326,7 +331,7 @@ type EngineOpts struct { // Engine handles the lifetime of queries from beginning to end. // It is connected to a querier. type Engine struct { - logger log.Logger + logger *slog.Logger metrics *engineMetrics timeout time.Duration maxSamplesPerQuery int @@ -344,7 +349,7 @@ type Engine struct { // NewEngine returns a new engine. func NewEngine(opts EngineOpts) *Engine { if opts.Logger == nil { - opts.Logger = log.NewNopLogger() + opts.Logger = promslog.NewNopLogger() } queryResultSummary := prometheus.NewSummaryVec(prometheus.SummaryOpts{ @@ -403,7 +408,7 @@ func NewEngine(opts EngineOpts) *Engine { if opts.LookbackDelta == 0 { opts.LookbackDelta = defaultLookbackDelta if l := opts.Logger; l != nil { - level.Debug(l).Log("msg", "Lookback delta is zero, setting to default value", "value", defaultLookbackDelta) + l.Debug("Lookback delta is zero, setting to default value", "value", defaultLookbackDelta) } } @@ -435,6 +440,10 @@ func NewEngine(opts EngineOpts) *Engine { // Close closes ng. func (ng *Engine) Close() error { + if ng == nil { + return nil + } + if ng.activeQueryTracker != nil { return ng.activeQueryTracker.Close() } @@ -451,7 +460,7 @@ func (ng *Engine) SetQueryLogger(l QueryLogger) { // not make reload fail; only log a warning. err := ng.queryLogger.Close() if err != nil { - level.Warn(ng.logger).Log("msg", "Error while closing the previous query log file", "err", err) + ng.logger.Warn("Error while closing the previous query log file", "err", err) } } @@ -629,23 +638,23 @@ func (ng *Engine) exec(ctx context.Context, q *query) (v parser.Value, ws annota // The step provided by the user is in seconds. params["step"] = int64(eq.Interval / (time.Second / time.Nanosecond)) } - f := []interface{}{"params", params} + l.With("params", params) if err != nil { - f = append(f, "error", err) + l.With("error", err) } - f = append(f, "stats", stats.NewQueryStats(q.Stats())) + l.With("stats", stats.NewQueryStats(q.Stats())) if span := trace.SpanFromContext(ctx); span != nil { - f = append(f, "spanID", span.SpanContext().SpanID()) + l.With("spanID", span.SpanContext().SpanID()) } if origin := ctx.Value(QueryOrigin{}); origin != nil { for k, v := range origin.(map[string]interface{}) { - f = append(f, k, v) + l.With(k, v) } } - if err := l.Log(f...); err != nil { - ng.metrics.queryLogFailures.Inc() - level.Error(ng.logger).Log("msg", "can't log query", "err", err) - } + l.Info("promql query logged") + // TODO: @tjhop -- do we still need this metric/error log if logger doesn't return errors? + // ng.metrics.queryLogFailures.Inc() + // ng.logger.Error("can't log query", "err", err) } ng.queryLoggerLock.RUnlock() }() @@ -731,6 +740,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval samplesStats: query.sampleStats, noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, enableDelayedNameRemoval: ng.enableDelayedNameRemoval, + querier: querier, } query.sampleStats.InitStepTracking(start, start, 1) @@ -789,6 +799,7 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval samplesStats: query.sampleStats, noStepSubqueryIntervalFn: ng.noStepSubqueryIntervalFn, enableDelayedNameRemoval: ng.enableDelayedNameRemoval, + querier: querier, } query.sampleStats.InitStepTracking(evaluator.startTimestamp, evaluator.endTimestamp, evaluator.interval) val, warnings, err := evaluator.Eval(ctxInnerEval, s.Expr) @@ -906,11 +917,17 @@ func getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path } if evalRange == 0 { - start -= durationMilliseconds(s.LookbackDelta) + // Reduce the start by one fewer ms than the lookback delta + // because wo want to exclude samples that are precisely the + // lookback delta before the eval time. + start -= durationMilliseconds(s.LookbackDelta) - 1 } else { - // For all matrix queries we want to ensure that we have (end-start) + range selected - // this way we have `range` data before the start time - start -= durationMilliseconds(evalRange) + // For all matrix queries we want to ensure that we have + // (end-start) + range selected this way we have `range` data + // before the start time. We subtract one from the range to + // exclude samples positioned directly at the lower boundary of + // the range. + start -= durationMilliseconds(evalRange) - 1 } offsetMilliseconds := durationMilliseconds(n.OriginalOffset) @@ -995,6 +1012,8 @@ func extractGroupsFromPath(p []parser.Node) (bool, []string) { return false, nil } +// checkAndExpandSeriesSet expands expr's UnexpandedSeriesSet into expr's Series. +// If the Series field is already non-nil, it's a no-op. func checkAndExpandSeriesSet(ctx context.Context, expr parser.Expr) (annotations.Annotations, error) { switch e := expr.(type) { case *parser.MatrixSelector: @@ -1048,11 +1067,12 @@ type evaluator struct { maxSamples int currentSamples int - logger log.Logger + logger *slog.Logger lookbackDelta time.Duration samplesStats *stats.QuerySamples noStepSubqueryIntervalFn func(rangeMillis int64) int64 enableDelayedNameRemoval bool + querier storage.Querier } // errorf causes a panic with the input formatted into an error. @@ -1078,7 +1098,7 @@ func (ev *evaluator) recover(expr parser.Expr, ws *annotations.Annotations, errp buf := make([]byte, 64<<10) buf = buf[:runtime.Stack(buf, false)] - level.Error(ev.logger).Log("msg", "runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf)) + ev.logger.Error("runtime panic during query evaluation", "expr", expr.String(), "err", e, "stacktrace", string(buf)) *errp = fmt.Errorf("unexpected error: %w", err) case errWithWarnings: *errp = err.err @@ -1214,38 +1234,17 @@ func (ev *evaluator) rangeEval(ctx context.Context, prepSeries func(labels.Label ev.currentSamples = tempNumSamples // Gather input vectors for this timestamp. for i := range exprs { - vectors[i] = vectors[i][:0] - + var bh []EvalSeriesHelper + var sh []EvalSeriesHelper if prepSeries != nil { - bufHelpers[i] = bufHelpers[i][:0] - } - - for si, series := range matrixes[i] { - switch { - case len(series.Floats) > 0 && series.Floats[0].T == ts: - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, F: series.Floats[0].F, T: ts, DropName: series.DropName}) - // Move input vectors forward so we don't have to re-scan the same - // past points at the next step. - matrixes[i][si].Floats = series.Floats[1:] - case len(series.Histograms) > 0 && series.Histograms[0].T == ts: - vectors[i] = append(vectors[i], Sample{Metric: series.Metric, H: series.Histograms[0].H, T: ts, DropName: series.DropName}) - matrixes[i][si].Histograms = series.Histograms[1:] - default: - continue - } - if prepSeries != nil { - bufHelpers[i] = append(bufHelpers[i], seriesHelpers[i][si]) - } - // Don't add histogram size here because we only - // copy the pointer above, not the whole - // histogram. - ev.currentSamples++ - if ev.currentSamples > ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } + bh = bufHelpers[i][:0] + sh = seriesHelpers[i] } + vectors[i], bh = ev.gatherVector(ts, matrixes[i], vectors[i], bh, sh) args[i] = vectors[i] - ev.samplesStats.UpdatePeak(ev.currentSamples) + if prepSeries != nil { + bufHelpers[i] = bh + } } // Make the function call. @@ -1446,6 +1445,79 @@ func (ev *evaluator) rangeEvalAgg(ctx context.Context, aggExpr *parser.Aggregate return result, warnings } +// evalSeries generates a Matrix between ev.startTimestamp and ev.endTimestamp (inclusive), each point spaced ev.interval apart, from series given offset. +// For every storage.Series iterator in series, the method iterates in ev.interval sized steps from ev.startTimestamp until and including ev.endTimestamp, +// collecting every corresponding sample (obtained via ev.vectorSelectorSingle) into a Series. +// All of the generated Series are collected into a Matrix, that gets returned. +func (ev *evaluator) evalSeries(ctx context.Context, series []storage.Series, offset time.Duration, recordOrigT bool) Matrix { + numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 + + mat := make(Matrix, 0, len(series)) + var prevSS *Series + it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) + var chkIter chunkenc.Iterator + for _, s := range series { + if err := contextDone(ctx, "expression evaluation"); err != nil { + ev.error(err) + } + + chkIter = s.Iterator(chkIter) + it.Reset(chkIter) + ss := Series{ + Metric: s.Labels(), + } + + for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { + step++ + origT, f, h, ok := ev.vectorSelectorSingle(it, offset, ts) + if !ok { + continue + } + + if h == nil { + ev.currentSamples++ + ev.samplesStats.IncrementSamplesAtStep(step, 1) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + if ss.Floats == nil { + ss.Floats = reuseOrGetFPointSlices(prevSS, numSteps) + } + if recordOrigT { + // This is an info metric, where we want to track the original sample timestamp. + // Info metric values should be 1 by convention, therefore we can re-use this + // space in the sample. + f = float64(origT) + } + ss.Floats = append(ss.Floats, FPoint{F: f, T: ts}) + } else { + if recordOrigT { + ev.error(fmt.Errorf("this should be an info metric, with float samples: %s", ss.Metric)) + } + + point := HPoint{H: h, T: ts} + histSize := point.size() + ev.currentSamples += histSize + ev.samplesStats.IncrementSamplesAtStep(step, int64(histSize)) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + if ss.Histograms == nil { + ss.Histograms = reuseOrGetHPointSlices(prevSS, numSteps) + } + ss.Histograms = append(ss.Histograms, point) + } + } + + if len(ss.Floats)+len(ss.Histograms) > 0 { + mat = append(mat, ss) + prevSS = &mat[len(mat)-1] + } + } + ev.samplesStats.UpdatePeak(ev.currentSamples) + return mat +} + // evalSubquery evaluates given SubqueryExpr and returns an equivalent // evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set. func (ev *evaluator) evalSubquery(ctx context.Context, subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, annotations.Annotations) { @@ -1592,6 +1664,8 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, return ev.evalLabelReplace(ctx, e.Args) case "label_join": return ev.evalLabelJoin(ctx, e.Args) + case "info": + return ev.evalInfo(ctx, e.Args) } if !matrixArg { @@ -1850,20 +1924,20 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, }, e.LHS, e.RHS) default: return ev.rangeEval(ctx, initSignatures, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh) + vec, err := ev.VectorBinop(e.Op, v[0].(Vector), v[1].(Vector), e.VectorMatching, e.ReturnBool, sh[0], sh[1], enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } case lt == parser.ValueTypeVector && rt == parser.ValueTypeScalar: return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh) + vec, err := ev.VectorscalarBinop(e.Op, v[0].(Vector), Scalar{V: v[1].(Vector)[0].F}, false, e.ReturnBool, enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) case lt == parser.ValueTypeScalar && rt == parser.ValueTypeVector: return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh) + vec, err := ev.VectorscalarBinop(e.Op, v[1].(Vector), Scalar{V: v[0].(Vector)[0].F}, true, e.ReturnBool, enh, e.PositionRange()) return vec, handleVectorBinopError(err, e) }, e.LHS, e.RHS) } @@ -1883,56 +1957,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, if err != nil { ev.error(errWithWarnings{fmt.Errorf("expanding series: %w", err), ws}) } - mat := make(Matrix, 0, len(e.Series)) - var prevSS *Series - it := storage.NewMemoizedEmptyIterator(durationMilliseconds(ev.lookbackDelta)) - var chkIter chunkenc.Iterator - for i, s := range e.Series { - if err := contextDone(ctx, "expression evaluation"); err != nil { - ev.error(err) - } - chkIter = s.Iterator(chkIter) - it.Reset(chkIter) - ss := Series{ - Metric: e.Series[i].Labels(), - } - - for ts, step := ev.startTimestamp, -1; ts <= ev.endTimestamp; ts += ev.interval { - step++ - _, f, h, ok := ev.vectorSelectorSingle(it, e, ts) - if ok { - if h == nil { - ev.currentSamples++ - ev.samplesStats.IncrementSamplesAtStep(step, 1) - if ev.currentSamples > ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } - if ss.Floats == nil { - ss.Floats = reuseOrGetFPointSlices(prevSS, numSteps) - } - ss.Floats = append(ss.Floats, FPoint{F: f, T: ts}) - } else { - point := HPoint{H: h, T: ts} - histSize := point.size() - ev.currentSamples += histSize - ev.samplesStats.IncrementSamplesAtStep(step, int64(histSize)) - if ev.currentSamples > ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } - if ss.Histograms == nil { - ss.Histograms = reuseOrGetHPointSlices(prevSS, numSteps) - } - ss.Histograms = append(ss.Histograms, point) - } - } - } - - if len(ss.Floats)+len(ss.Histograms) > 0 { - mat = append(mat, ss) - prevSS = &mat[len(mat)-1] - } - } - ev.samplesStats.UpdatePeak(ev.currentSamples) + mat := ev.evalSeries(ctx, e.Series, e.Offset, false) return mat, ws case *parser.MatrixSelector: @@ -1953,6 +1978,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, samplesStats: ev.samplesStats.NewChild(), noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, enableDelayedNameRemoval: ev.enableDelayedNameRemoval, + querier: ev.querier, } if e.Step != 0 { @@ -1997,6 +2023,7 @@ func (ev *evaluator) eval(ctx context.Context, expr parser.Expr) (parser.Value, samplesStats: ev.samplesStats.NewChild(), noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, enableDelayedNameRemoval: ev.enableDelayedNameRemoval, + querier: ev.querier, } res, ws := newEv.eval(ctx, e.Expr) ev.currentSamples = newEv.currentSamples @@ -2083,7 +2110,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co seriesIterators := make([]*storage.MemoizedSeriesIterator, len(vs.Series)) for i, s := range vs.Series { it := s.Iterator(nil) - seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)) + seriesIterators[i] = storage.NewMemoizedIterator(it, durationMilliseconds(ev.lookbackDelta)-1) } return ev.rangeEval(ctx, nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, annotations.Annotations) { @@ -2097,7 +2124,7 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co vec := make(Vector, 0, len(vs.Series)) for i, s := range vs.Series { it := seriesIterators[i] - t, _, _, ok := ev.vectorSelectorSingle(it, vs, enh.Ts) + t, _, _, ok := ev.vectorSelectorSingle(it, vs.Offset, enh.Ts) if !ok { continue } @@ -2121,10 +2148,10 @@ func (ev *evaluator) rangeEvalTimestampFunctionOverVectorSelector(ctx context.Co } // vectorSelectorSingle evaluates an instant vector for the iterator of one time series. -func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, node *parser.VectorSelector, ts int64) ( +func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, offset time.Duration, ts int64) ( int64, float64, *histogram.FloatHistogram, bool, ) { - refTime := ts - durationMilliseconds(node.Offset) + refTime := ts - durationMilliseconds(offset) var t int64 var v float64 var h *histogram.FloatHistogram @@ -2145,7 +2172,7 @@ func (ev *evaluator) vectorSelectorSingle(it *storage.MemoizedSeriesIterator, no if valueType == chunkenc.ValNone || t > refTime { var ok bool t, v, h, ok = it.PeekPrev() - if !ok || t < refTime-durationMilliseconds(ev.lookbackDelta) { + if !ok || t <= refTime-durationMilliseconds(ev.lookbackDelta) { return 0, 0, nil, false } } @@ -2279,20 +2306,20 @@ func (ev *evaluator) matrixIterSlice( mintFloats, mintHistograms := mint, mint // First floats... - if len(floats) > 0 && floats[len(floats)-1].T >= mint { + if len(floats) > 0 && floats[len(floats)-1].T > mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; floats[drop].T < mint; drop++ { + for drop = 0; floats[drop].T <= mint; drop++ { } ev.currentSamples -= drop copy(floats, floats[drop:]) floats = floats[:len(floats)-drop] // Only append points with timestamps after the last timestamp we have. - mintFloats = floats[len(floats)-1].T + 1 + mintFloats = floats[len(floats)-1].T } else { ev.currentSamples -= len(floats) if floats != nil { @@ -2301,14 +2328,14 @@ func (ev *evaluator) matrixIterSlice( } // ...then the same for histograms. TODO(beorn7): Use generics? - if len(histograms) > 0 && histograms[len(histograms)-1].T >= mint { + if len(histograms) > 0 && histograms[len(histograms)-1].T > mint { // There is an overlap between previous and current ranges, retain common // points. In most such cases: // (a) the overlap is significantly larger than the eval step; and/or // (b) the number of samples is relatively small. // so a linear search will be as fast as a binary search. var drop int - for drop = 0; histograms[drop].T < mint; drop++ { + for drop = 0; histograms[drop].T <= mint; drop++ { } // Rotate the buffer around the drop index so that points before mint can be // reused to store new histograms. @@ -2319,7 +2346,7 @@ func (ev *evaluator) matrixIterSlice( histograms = histograms[:len(histograms)-drop] ev.currentSamples -= totalHPointSize(histograms) // Only append points with timestamps after the last timestamp we have. - mintHistograms = histograms[len(histograms)-1].T + 1 + mintHistograms = histograms[len(histograms)-1].T } else { ev.currentSamples -= totalHPointSize(histograms) if histograms != nil { @@ -2343,7 +2370,7 @@ loop: case chunkenc.ValFloatHistogram, chunkenc.ValHistogram: t := buf.AtT() // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mintHistograms { + if t > mintHistograms { if histograms == nil { histograms = getMatrixSelectorHPoints() } @@ -2369,7 +2396,7 @@ loop: continue loop } // Values in the buffer are guaranteed to be smaller than maxt. - if t >= mintFloats { + if t > mintFloats { ev.currentSamples++ if ev.currentSamples > ev.maxSamples { ev.error(ErrTooManySamples(env)) @@ -2504,7 +2531,7 @@ func (ev *evaluator) VectorUnless(lhs, rhs Vector, matching *parser.VectorMatchi } // VectorBinop evaluates a binary operation between two Vectors, excluding set operators. -func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper) (Vector, error) { +func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching *parser.VectorMatching, returnBool bool, lhsh, rhsh []EvalSeriesHelper, enh *EvalNodeHelper, pos posrange.PositionRange) (Vector, error) { if matching.Card == parser.CardManyToMany { panic("many-to-many only allowed for set operators") } @@ -2578,7 +2605,7 @@ func (ev *evaluator) VectorBinop(op parser.ItemType, lhs, rhs Vector, matching * fl, fr = fr, fl hl, hr = hr, hl } - floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr) + floatValue, histogramValue, keep, err := vectorElemBinop(op, fl, fr, hl, hr, pos) if err != nil { lastErr = err } @@ -2687,7 +2714,7 @@ func resultMetric(lhs, rhs labels.Labels, op parser.ItemType, matching *parser.V } // VectorscalarBinop evaluates a binary operation between a Vector and a Scalar. -func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper) (Vector, error) { +func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scalar, swap, returnBool bool, enh *EvalNodeHelper, pos posrange.PositionRange) (Vector, error) { var lastErr error for _, lhsSample := range lhs { lf, rf := lhsSample.F, rhs.V @@ -2699,7 +2726,7 @@ func (ev *evaluator) VectorscalarBinop(op parser.ItemType, lhs Vector, rhs Scala lf, rf = rf, lf lh, rh = rh, lh } - float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh) + float, histogram, keep, err := vectorElemBinop(op, lf, rf, lh, rh, pos) if err != nil { lastErr = err } @@ -2766,7 +2793,7 @@ func scalarBinop(op parser.ItemType, lhs, rhs float64) float64 { } // vectorElemBinop evaluates a binary operation between two Vector elements. -func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram) (float64, *histogram.FloatHistogram, bool, error) { +func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram.FloatHistogram, pos posrange.PositionRange) (float64, *histogram.FloatHistogram, bool, error) { switch op { case parser.ADD: if hlhs != nil && hrhs != nil { @@ -2776,7 +2803,13 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram } return 0, res.Compact(0), true, nil } - return lhs + rhs, nil, true, nil + if hlhs == nil && hrhs == nil { + return lhs + rhs, nil, true, nil + } + if hlhs != nil { + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", "+", "float", pos) + } + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", "+", "histogram", pos) case parser.SUB: if hlhs != nil && hrhs != nil { res, err := hlhs.Copy().Sub(hrhs) @@ -2785,7 +2818,13 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram } return 0, res.Compact(0), true, nil } - return lhs - rhs, nil, true, nil + if hlhs == nil && hrhs == nil { + return lhs - rhs, nil, true, nil + } + if hlhs != nil { + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", "-", "float", pos) + } + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", "-", "histogram", pos) case parser.MUL: if hlhs != nil && hrhs == nil { return 0, hlhs.Copy().Mul(rhs), true, nil @@ -2793,11 +2832,20 @@ func vectorElemBinop(op parser.ItemType, lhs, rhs float64, hlhs, hrhs *histogram if hlhs == nil && hrhs != nil { return 0, hrhs.Copy().Mul(lhs), true, nil } + if hlhs != nil && hrhs != nil { + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", "*", "histogram", pos) + } return lhs * rhs, nil, true, nil case parser.DIV: if hlhs != nil && hrhs == nil { return 0, hlhs.Copy().Div(rhs), true, nil } + if hrhs != nil { + if hlhs != nil { + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("histogram", "/", "histogram", pos) + } + return 0, nil, false, annotations.NewIncompatibleTypesInBinOpInfo("float", "/", "histogram", pos) + } return lhs / rhs, nil, true, nil case parser.POW: return math.Pow(lhs, rhs), nil, true, nil @@ -2874,7 +2922,15 @@ func (ev *evaluator) aggregation(e *parser.AggregateExpr, q float64, inputMatrix group.hasHistogram = true } case parser.STDVAR, parser.STDDEV: - group.floatValue = 0 + switch { + case h != nil: + // Ignore histograms for STDVAR and STDDEV. + group.seen = false + case math.IsNaN(f), math.IsInf(f, 0): + group.floatValue = math.NaN() + default: + group.floatValue = 0 + } case parser.QUANTILE: group.heap = make(vectorByValueHeap, 1) group.heap[0] = Sample{F: f} @@ -3335,6 +3391,9 @@ func handleVectorBinopError(err error, e *parser.BinaryExpr) annotations.Annotat } metricName := "" pos := e.PositionRange() + if errors.Is(err, annotations.PromQLInfo) || errors.Is(err, annotations.PromQLWarning) { + return annotations.New().Add(err) + } if errors.Is(err, histogram.ErrHistogramsIncompatibleSchema) { return annotations.New().Add(annotations.NewMixedExponentialCustomHistogramsWarning(metricName, pos)) } else if errors.Is(err, histogram.ErrHistogramsIncompatibleBounds) { @@ -3661,3 +3720,41 @@ func newHistogramStatsSeries(series storage.Series) *histogramStatsSeries { func (s histogramStatsSeries) Iterator(it chunkenc.Iterator) chunkenc.Iterator { return NewHistogramStatsIterator(s.Series.Iterator(it)) } + +// gatherVector gathers a Vector for ts from the series in input. +// output is used as a buffer. +// If bufHelpers and seriesHelpers are provided, seriesHelpers[i] is appended to bufHelpers for every input index i. +// The gathered Vector and bufHelper are returned. +func (ev *evaluator) gatherVector(ts int64, input Matrix, output Vector, bufHelpers, seriesHelpers []EvalSeriesHelper) (Vector, []EvalSeriesHelper) { + output = output[:0] + for i, series := range input { + switch { + case len(series.Floats) > 0 && series.Floats[0].T == ts: + s := series.Floats[0] + output = append(output, Sample{Metric: series.Metric, F: s.F, T: ts, DropName: series.DropName}) + // Move input vectors forward so we don't have to re-scan the same + // past points at the next step. + input[i].Floats = series.Floats[1:] + case len(series.Histograms) > 0 && series.Histograms[0].T == ts: + s := series.Histograms[0] + output = append(output, Sample{Metric: series.Metric, H: s.H, T: ts, DropName: series.DropName}) + input[i].Histograms = series.Histograms[1:] + default: + continue + } + if len(seriesHelpers) > 0 { + bufHelpers = append(bufHelpers, seriesHelpers[i]) + } + + // Don't add histogram size here because we only + // copy the pointer above, not the whole + // histogram. + ev.currentSamples++ + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + } + ev.samplesStats.UpdatePeak(ev.currentSamples) + + return output, bufHelpers +} diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index 182b69b0800..fab30166d78 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -350,7 +350,7 @@ func calcTrendValue(i int, tf, s0, s1, b float64) float64 { // data. A lower smoothing factor increases the influence of historical data. The trend factor (0 < tf < 1) affects // how trends in historical data will affect the current data. A higher trend factor increases the influence. // of trends. Algorithm taken from https://en.wikipedia.org/wiki/Exponential_smoothing titled: "Double exponential smoothing". -func funcHoltWinters(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { +func funcDoubleExponentialSmoothing(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { samples := vals[0].(Matrix)[0] // The smoothing factor argument. @@ -415,22 +415,12 @@ func funcSortDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHel // === sort_by_label(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - // First, sort by the full label set. This ensures a consistent ordering in case sorting by the - // labels provided as arguments is not conclusive. + lbls := stringSliceFromArgs(args[1:]) slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { - return labels.Compare(a.Metric, b.Metric) - }) - - labels := stringSliceFromArgs(args[1:]) - // Next, sort by the labels provided as arguments. - slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { - // Iterate over each given label. - for _, label := range labels { + for _, label := range lbls { lv1 := a.Metric.Get(label) lv2 := b.Metric.Get(label) - // If we encounter multiple samples with the same label values, the sorting which was - // performed in the first step will act as a "tie breaker". if lv1 == lv2 { continue } @@ -442,7 +432,8 @@ func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNode return +1 } - return 0 + // If all labels provided as arguments were equal, sort by the full label set. This ensures a consistent ordering. + return labels.Compare(a.Metric, b.Metric) }) return vals[0].(Vector), nil @@ -450,22 +441,12 @@ func funcSortByLabel(vals []parser.Value, args parser.Expressions, enh *EvalNode // === sort_by_label_desc(vector parser.ValueTypeVector, label parser.ValueTypeString...) (Vector, Annotations) === func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - // First, sort by the full label set. This ensures a consistent ordering in case sorting by the - // labels provided as arguments is not conclusive. - slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { - return labels.Compare(b.Metric, a.Metric) - }) - - labels := stringSliceFromArgs(args[1:]) - // Next, sort by the labels provided as arguments. + lbls := stringSliceFromArgs(args[1:]) slices.SortFunc(vals[0].(Vector), func(a, b Sample) int { - // Iterate over each given label. - for _, label := range labels { + for _, label := range lbls { lv1 := a.Metric.Get(label) lv2 := b.Metric.Get(label) - // If we encounter multiple samples with the same label values, the sorting which was - // performed in the first step will act as a "tie breaker". if lv1 == lv2 { continue } @@ -477,7 +458,8 @@ func funcSortByLabelDesc(vals []parser.Value, args parser.Expressions, enh *Eval return -1 } - return 0 + // If all labels provided as arguments were equal, sort by the full label set. This ensures a consistent ordering. + return -labels.Compare(a.Metric, b.Metric) }) return vals[0].(Vector), nil @@ -551,6 +533,10 @@ func funcRound(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper toNearestInverse := 1.0 / toNearest for _, el := range vec { + if el.H != nil { + // Process only float samples. + continue + } f := math.Floor(el.F*toNearestInverse+0.5) / toNearestInverse enh.Out = append(enh.Out, Sample{ Metric: el.Metric, @@ -1480,7 +1466,7 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio regexStr = stringFromArg(args[4]) ) - regex, err := regexp.Compile("^(?:" + regexStr + ")$") + regex, err := regexp.Compile("^(?s:" + regexStr + ")$") if err != nil { panic(fmt.Errorf("invalid regular expression in label_replace(): %s", regexStr)) } @@ -1514,11 +1500,6 @@ func (ev *evaluator) evalLabelReplace(ctx context.Context, args parser.Expressio return matrix, ws } -// === label_replace(Vector parser.ValueTypeVector, dst_label, replacement, src_labelname, regex parser.ValueTypeString) (Vector, Annotations) === -func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - panic("funcLabelReplace wrong implementation called") -} - // === Vector(s Scalar) (Vector, Annotations) === func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { return append(enh.Out, @@ -1570,11 +1551,6 @@ func (ev *evaluator) evalLabelJoin(ctx context.Context, args parser.Expressions) return matrix, ws } -// === label_join(vector model.ValVector, dest_labelname, separator, src_labelname...) (Vector, Annotations) === -func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) (Vector, annotations.Annotations) { - panic("funcLabelReplace wrong implementation called") -} - // Common code for date related functions. func dateWrapper(vals []parser.Value, enh *EvalNodeHelper, f func(time.Time) float64) Vector { if len(vals) == 0 { @@ -1657,47 +1633,50 @@ func funcYear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) // FunctionCalls is a list of all functions supported by PromQL, including their types. var FunctionCalls = map[string]FunctionCall{ - "abs": funcAbs, - "absent": funcAbsent, - "absent_over_time": funcAbsentOverTime, - "acos": funcAcos, - "acosh": funcAcosh, - "asin": funcAsin, - "asinh": funcAsinh, - "atan": funcAtan, - "atanh": funcAtanh, - "avg_over_time": funcAvgOverTime, - "ceil": funcCeil, - "changes": funcChanges, - "clamp": funcClamp, - "clamp_max": funcClampMax, - "clamp_min": funcClampMin, - "cos": funcCos, - "cosh": funcCosh, - "count_over_time": funcCountOverTime, - "days_in_month": funcDaysInMonth, - "day_of_month": funcDayOfMonth, - "day_of_week": funcDayOfWeek, - "day_of_year": funcDayOfYear, - "deg": funcDeg, - "delta": funcDelta, - "deriv": funcDeriv, - "exp": funcExp, - "floor": funcFloor, - "histogram_avg": funcHistogramAvg, - "histogram_count": funcHistogramCount, - "histogram_fraction": funcHistogramFraction, - "histogram_quantile": funcHistogramQuantile, - "histogram_sum": funcHistogramSum, - "histogram_stddev": funcHistogramStdDev, - "histogram_stdvar": funcHistogramStdVar, - "holt_winters": funcHoltWinters, + "abs": funcAbs, + "absent": funcAbsent, + "absent_over_time": funcAbsentOverTime, + "acos": funcAcos, + "acosh": funcAcosh, + "asin": funcAsin, + "asinh": funcAsinh, + "atan": funcAtan, + "atanh": funcAtanh, + "avg_over_time": funcAvgOverTime, + "ceil": funcCeil, + "changes": funcChanges, + "clamp": funcClamp, + "clamp_max": funcClampMax, + "clamp_min": funcClampMin, + "cos": funcCos, + "cosh": funcCosh, + "count_over_time": funcCountOverTime, + "days_in_month": funcDaysInMonth, + "day_of_month": funcDayOfMonth, + "day_of_week": funcDayOfWeek, + "day_of_year": funcDayOfYear, + "deg": funcDeg, + "delta": funcDelta, + "deriv": funcDeriv, + "exp": funcExp, + "floor": funcFloor, + "histogram_avg": funcHistogramAvg, + "histogram_count": funcHistogramCount, + "histogram_fraction": funcHistogramFraction, + "histogram_quantile": funcHistogramQuantile, + "histogram_sum": funcHistogramSum, + "histogram_stddev": funcHistogramStdDev, + "histogram_stdvar": funcHistogramStdVar, + "double_exponential_smoothing": funcDoubleExponentialSmoothing, + // Keep an alias for Mimir users using holt_winters. + "holt_winters": funcDoubleExponentialSmoothing, "hour": funcHour, "idelta": funcIdelta, "increase": funcIncrease, + "info": nil, "irate": funcIrate, - "label_replace": funcLabelReplace, - "label_join": funcLabelJoin, + "label_replace": nil, // evalLabelReplace not called via this map. + "label_join": nil, // evalLabelJoin not called via this map. "ln": funcLn, "log10": funcLog10, "log2": funcLog2, diff --git a/vendor/github.com/prometheus/prometheus/promql/fuzz.go b/vendor/github.com/prometheus/prometheus/promql/fuzz.go index 5f08e6a72c9..759055fb0d9 100644 --- a/vendor/github.com/prometheus/prometheus/promql/fuzz.go +++ b/vendor/github.com/prometheus/prometheus/promql/fuzz.go @@ -61,17 +61,13 @@ const ( var symbolTable = labels.NewSymbolTable() func fuzzParseMetricWithContentType(in []byte, contentType string) int { - p, warning := textparse.New(in, contentType, false, symbolTable) - if warning != nil { + p, warning := textparse.New(in, contentType, "", false, false, symbolTable) + if p == nil || warning != nil { // An invalid content type is being passed, which should not happen // in this context. panic(warning) } - if contentType == "application/openmetrics-text" { - p = textparse.NewOpenMetricsParser(in, symbolTable) - } - var err error for { _, err = p.Next() @@ -95,7 +91,7 @@ func fuzzParseMetricWithContentType(in []byte, contentType string) int { // Note that this is not the parser for the text-based exposition-format; that // lives in github.com/prometheus/client_golang/text. func FuzzParseMetric(in []byte) int { - return fuzzParseMetricWithContentType(in, "") + return fuzzParseMetricWithContentType(in, "text/plain") } func FuzzParseOpenMetric(in []byte) int { diff --git a/vendor/github.com/prometheus/prometheus/promql/info.go b/vendor/github.com/prometheus/prometheus/promql/info.go new file mode 100644 index 00000000000..1a9f7eb18e4 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/promql/info.go @@ -0,0 +1,454 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promql + +import ( + "context" + "errors" + "fmt" + "slices" + "strings" + + "github.com/grafana/regexp" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/annotations" +) + +const targetInfo = "target_info" + +// identifyingLabels are the labels we consider as identifying for info metrics. +// Currently hard coded, so we don't need knowledge of individual info metrics. +var identifyingLabels = []string{"instance", "job"} + +// evalInfo implements the info PromQL function. +func (ev *evaluator) evalInfo(ctx context.Context, args parser.Expressions) (parser.Value, annotations.Annotations) { + val, annots := ev.eval(ctx, args[0]) + mat := val.(Matrix) + // Map from data label name to matchers. + dataLabelMatchers := map[string][]*labels.Matcher{} + var infoNameMatchers []*labels.Matcher + if len(args) > 1 { + // TODO: Introduce a dedicated LabelSelector type. + labelSelector := args[1].(*parser.VectorSelector) + for _, m := range labelSelector.LabelMatchers { + dataLabelMatchers[m.Name] = append(dataLabelMatchers[m.Name], m) + if m.Name == labels.MetricName { + infoNameMatchers = append(infoNameMatchers, m) + } + } + } else { + infoNameMatchers = []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, targetInfo)} + } + + // Don't try to enrich info series. + ignoreSeries := map[int]struct{}{} +loop: + for i, s := range mat { + name := s.Metric.Get(labels.MetricName) + for _, m := range infoNameMatchers { + if m.Matches(name) { + ignoreSeries[i] = struct{}{} + continue loop + } + } + } + + selectHints := ev.infoSelectHints(args[0]) + infoSeries, ws, err := ev.fetchInfoSeries(ctx, mat, ignoreSeries, dataLabelMatchers, selectHints) + if err != nil { + ev.error(err) + } + annots.Merge(ws) + + res, ws := ev.combineWithInfoSeries(ctx, mat, infoSeries, ignoreSeries, dataLabelMatchers) + annots.Merge(ws) + return res, annots +} + +// infoSelectHints calculates the storage.SelectHints for selecting info series, given expr (first argument to info call). +func (ev *evaluator) infoSelectHints(expr parser.Expr) storage.SelectHints { + var nodeTimestamp *int64 + var offset int64 + parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + switch n := node.(type) { + case *parser.VectorSelector: + if n.Timestamp != nil { + nodeTimestamp = n.Timestamp + } + offset = durationMilliseconds(n.OriginalOffset) + return fmt.Errorf("end traversal") + default: + return nil + } + }) + + start := ev.startTimestamp + end := ev.endTimestamp + if nodeTimestamp != nil { + // The timestamp on the selector overrides everything. + start = *nodeTimestamp + end = *nodeTimestamp + } + // Reduce the start by one fewer ms than the lookback delta + // because wo want to exclude samples that are precisely the + // lookback delta before the eval time. + start -= durationMilliseconds(ev.lookbackDelta) - 1 + start -= offset + end -= offset + + return storage.SelectHints{ + Start: start, + End: end, + Step: ev.interval, + Func: "info", + } +} + +// fetchInfoSeries fetches info series given matching identifying labels in mat. +// Series in ignoreSeries are not fetched. +// dataLabelMatchers may be mutated. +func (ev *evaluator) fetchInfoSeries(ctx context.Context, mat Matrix, ignoreSeries map[int]struct{}, dataLabelMatchers map[string][]*labels.Matcher, selectHints storage.SelectHints) (Matrix, annotations.Annotations, error) { + // A map of values for all identifying labels we are interested in. + idLblValues := map[string]map[string]struct{}{} + for i, s := range mat { + if _, exists := ignoreSeries[i]; exists { + continue + } + + // Register relevant values per identifying label for this series. + for _, l := range identifyingLabels { + val := s.Metric.Get(l) + if val == "" { + continue + } + + if idLblValues[l] == nil { + idLblValues[l] = map[string]struct{}{} + } + idLblValues[l][val] = struct{}{} + } + } + if len(idLblValues) == 0 { + return nil, nil, nil + } + + // Generate regexps for every interesting value per identifying label. + var sb strings.Builder + idLblRegexps := make(map[string]string, len(idLblValues)) + for name, vals := range idLblValues { + sb.Reset() + i := 0 + for v := range vals { + if i > 0 { + sb.WriteRune('|') + } + sb.WriteString(regexp.QuoteMeta(v)) + i++ + } + idLblRegexps[name] = sb.String() + } + + var infoLabelMatchers []*labels.Matcher + for name, re := range idLblRegexps { + infoLabelMatchers = append(infoLabelMatchers, labels.MustNewMatcher(labels.MatchRegexp, name, re)) + } + var nameMatcher *labels.Matcher + for name, ms := range dataLabelMatchers { + for i, m := range ms { + if m.Name == labels.MetricName { + nameMatcher = m + ms = slices.Delete(ms, i, i+1) + } + infoLabelMatchers = append(infoLabelMatchers, m) + } + if len(ms) > 0 { + dataLabelMatchers[name] = ms + } else { + delete(dataLabelMatchers, name) + } + } + if nameMatcher == nil { + // Default to using the target_info metric. + infoLabelMatchers = append([]*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, targetInfo)}, infoLabelMatchers...) + } + + infoIt := ev.querier.Select(ctx, false, &selectHints, infoLabelMatchers...) + infoSeries, ws, err := expandSeriesSet(ctx, infoIt) + if err != nil { + return nil, ws, err + } + + infoMat := ev.evalSeries(ctx, infoSeries, 0, true) + return infoMat, ws, nil +} + +// combineWithInfoSeries combines mat with select data labels from infoMat. +func (ev *evaluator) combineWithInfoSeries(ctx context.Context, mat, infoMat Matrix, ignoreSeries map[int]struct{}, dataLabelMatchers map[string][]*labels.Matcher) (Matrix, annotations.Annotations) { + buf := make([]byte, 0, 1024) + lb := labels.NewScratchBuilder(0) + sigFunction := func(name string) func(labels.Labels) string { + return func(lset labels.Labels) string { + lb.Reset() + lb.Add(labels.MetricName, name) + lset.MatchLabels(true, identifyingLabels...).Range(func(l labels.Label) { + lb.Add(l.Name, l.Value) + }) + lb.Sort() + return string(lb.Labels().Bytes(buf)) + } + } + + infoMetrics := map[string]struct{}{} + for _, is := range infoMat { + lblMap := is.Metric.Map() + infoMetrics[lblMap[labels.MetricName]] = struct{}{} + } + sigfs := make(map[string]func(labels.Labels) string, len(infoMetrics)) + for name := range infoMetrics { + sigfs[name] = sigFunction(name) + } + + // Keep a copy of the original point slices so they can be returned to the pool. + origMatrices := []Matrix{ + make(Matrix, len(mat)), + make(Matrix, len(infoMat)), + } + copy(origMatrices[0], mat) + copy(origMatrices[1], infoMat) + + numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 + originalNumSamples := ev.currentSamples + + // Create an output vector that is as big as the input matrix with + // the most time series. + biggestLen := max(len(mat), len(infoMat)) + baseVector := make(Vector, 0, len(mat)) + infoVector := make(Vector, 0, len(infoMat)) + enh := &EvalNodeHelper{ + Out: make(Vector, 0, biggestLen), + } + type seriesAndTimestamp struct { + Series + ts int64 + } + seriess := make(map[uint64]seriesAndTimestamp, biggestLen) // Output series by series hash. + tempNumSamples := ev.currentSamples + + // For every base series, compute signature per info metric. + baseSigs := make([]map[string]string, 0, len(mat)) + for _, s := range mat { + sigs := make(map[string]string, len(infoMetrics)) + for infoName := range infoMetrics { + sigs[infoName] = sigfs[infoName](s.Metric) + } + baseSigs = append(baseSigs, sigs) + } + + infoSigs := make([]string, 0, len(infoMat)) + for _, s := range infoMat { + name := s.Metric.Map()[labels.MetricName] + infoSigs = append(infoSigs, sigfs[name](s.Metric)) + } + + var warnings annotations.Annotations + for ts := ev.startTimestamp; ts <= ev.endTimestamp; ts += ev.interval { + if err := contextDone(ctx, "expression evaluation"); err != nil { + ev.error(err) + } + + // Reset number of samples in memory after each timestamp. + ev.currentSamples = tempNumSamples + // Gather input vectors for this timestamp. + baseVector, _ = ev.gatherVector(ts, mat, baseVector, nil, nil) + infoVector, _ = ev.gatherVector(ts, infoMat, infoVector, nil, nil) + + enh.Ts = ts + result, err := ev.combineWithInfoVector(baseVector, infoVector, ignoreSeries, baseSigs, infoSigs, enh, dataLabelMatchers) + if err != nil { + ev.error(err) + } + enh.Out = result[:0] // Reuse result vector. + + vecNumSamples := result.TotalSamples() + ev.currentSamples += vecNumSamples + // When we reset currentSamples to tempNumSamples during the next iteration of the loop it also + // needs to include the samples from the result here, as they're still in memory. + tempNumSamples += vecNumSamples + ev.samplesStats.UpdatePeak(ev.currentSamples) + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + + // Add samples in result vector to output series. + for _, sample := range result { + h := sample.Metric.Hash() + ss, exists := seriess[h] + if exists { + if ss.ts == ts { // If we've seen this output series before at this timestamp, it's a duplicate. + ev.errorf("vector cannot contain metrics with the same labelset") + } + ss.ts = ts + } else { + ss = seriesAndTimestamp{Series{Metric: sample.Metric}, ts} + } + addToSeries(&ss.Series, enh.Ts, sample.F, sample.H, numSteps) + seriess[h] = ss + } + } + + // Reuse the original point slices. + for _, m := range origMatrices { + for _, s := range m { + putFPointSlice(s.Floats) + putHPointSlice(s.Histograms) + } + } + // Assemble the output matrix. By the time we get here we know we don't have too many samples. + numSamples := 0 + output := make(Matrix, 0, len(seriess)) + for _, ss := range seriess { + numSamples += len(ss.Floats) + totalHPointSize(ss.Histograms) + output = append(output, ss.Series) + } + ev.currentSamples = originalNumSamples + numSamples + ev.samplesStats.UpdatePeak(ev.currentSamples) + return output, warnings +} + +// combineWithInfoVector combines base and info Vectors. +// Base series in ignoreSeries are not combined. +func (ev *evaluator) combineWithInfoVector(base, info Vector, ignoreSeries map[int]struct{}, baseSigs []map[string]string, infoSigs []string, enh *EvalNodeHelper, dataLabelMatchers map[string][]*labels.Matcher) (Vector, error) { + if len(base) == 0 { + return nil, nil // Short-circuit: nothing is going to match. + } + + // All samples from the info Vector hashed by the matching label/values. + if enh.rightSigs == nil { + enh.rightSigs = make(map[string]Sample, len(enh.Out)) + } else { + clear(enh.rightSigs) + } + + for i, s := range info { + if s.H != nil { + ev.error(errors.New("info sample should be float")) + } + // We encode original info sample timestamps via the float value. + origT := int64(s.F) + + sig := infoSigs[i] + if existing, exists := enh.rightSigs[sig]; exists { + // We encode original info sample timestamps via the float value. + existingOrigT := int64(existing.F) + switch { + case existingOrigT > origT: + // Keep the other info sample, since it's newer. + case existingOrigT < origT: + // Keep this info sample, since it's newer. + enh.rightSigs[sig] = s + default: + // The two info samples have the same timestamp - conflict. + name := s.Metric.Map()[labels.MetricName] + ev.errorf("found duplicate series for info metric %s", name) + } + } else { + enh.rightSigs[sig] = s + } + } + + for i, bs := range base { + if _, exists := ignoreSeries[i]; exists { + // This series should not be enriched with info metric data labels. + enh.Out = append(enh.Out, Sample{ + Metric: bs.Metric, + F: bs.F, + H: bs.H, + }) + continue + } + + baseLabels := bs.Metric.Map() + enh.resetBuilder(labels.Labels{}) + + // For every info metric name, try to find an info series with the same signature. + seenInfoMetrics := map[string]struct{}{} + for infoName, sig := range baseSigs[i] { + is, exists := enh.rightSigs[sig] + if !exists { + continue + } + if _, exists := seenInfoMetrics[infoName]; exists { + continue + } + + err := is.Metric.Validate(func(l labels.Label) error { + if l.Name == labels.MetricName { + return nil + } + if _, exists := dataLabelMatchers[l.Name]; len(dataLabelMatchers) > 0 && !exists { + // Not among the specified data label matchers. + return nil + } + + if v := enh.lb.Get(l.Name); v != "" && v != l.Value { + return fmt.Errorf("conflicting label: %s", l.Name) + } + if _, exists := baseLabels[l.Name]; exists { + // Skip labels already on the base metric. + return nil + } + + enh.lb.Set(l.Name, l.Value) + return nil + }) + if err != nil { + return nil, err + } + seenInfoMetrics[infoName] = struct{}{} + } + + infoLbls := enh.lb.Labels() + if infoLbls.Len() == 0 { + // If there's at least one data label matcher not matching the empty string, + // we have to ignore this series as there are no matching info series. + allMatchersMatchEmpty := true + for _, ms := range dataLabelMatchers { + for _, m := range ms { + if !m.Matches("") { + allMatchersMatchEmpty = false + break + } + } + } + if !allMatchersMatchEmpty { + continue + } + } + + enh.resetBuilder(bs.Metric) + infoLbls.Range(func(l labels.Label) { + enh.lb.Set(l.Name, l.Value) + }) + + enh.Out = append(enh.Out, Sample{ + Metric: enh.lb.Labels(), + F: bs.F, + H: bs.H, + }) + } + return enh.Out, nil +} diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go index 162d7817ab0..132ef3f0d28 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go @@ -208,6 +208,10 @@ type VectorSelector struct { UnexpandedSeriesSet storage.SeriesSet Series []storage.Series + // BypassEmptyMatcherCheck is true when the VectorSelector isn't required to have at least one matcher matching the empty string. + // This is the case when VectorSelector is used to represent the info function's second argument. + BypassEmptyMatcherCheck bool + PosRange posrange.PositionRange } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go index 99b41321fed..0cedfd9f1f1 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go @@ -202,10 +202,17 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector}, ReturnType: ValueTypeVector, }, + "double_exponential_smoothing": { + Name: "double_exponential_smoothing", + ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, + ReturnType: ValueTypeVector, + Experimental: true, + }, + // Keep an alias for Mimir users using holt_winters. "holt_winters": { - Name: "holt_winters", - ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, - ReturnType: ValueTypeVector, + Name: "holt_winters", + ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, + ReturnType: ValueTypeVector, }, "hour": { Name: "hour", @@ -223,6 +230,13 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeMatrix}, ReturnType: ValueTypeVector, }, + "info": { + Name: "info", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeVector}, + ReturnType: ValueTypeVector, + Experimental: true, + Variadic: 1, + }, "irate": { Name: "irate", ArgTypes: []ValueType{ValueTypeMatrix}, diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go index 6dda2705297..3f50cf2b8ac 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go @@ -786,6 +786,19 @@ func (p *parser) checkAST(node Node) (typ ValueType) { } } + if n.Func.Name == "info" && len(n.Args) > 1 { + // Check the type is correct first + if n.Args[1].Type() != ValueTypeVector { + p.addParseErrf(node.PositionRange(), "expected type %s in %s, got %s", DocumentedType(ValueTypeVector), fmt.Sprintf("call to function %q", n.Func.Name), DocumentedType(n.Args[1].Type())) + } + // Check the vector selector in the input doesn't contain a metric name + if n.Args[1].(*VectorSelector).Name != "" { + p.addParseErrf(n.Args[1].PositionRange(), "expected label selectors only, got vector selector instead") + } + // Set Vector Selector flag to bypass empty matcher check + n.Args[1].(*VectorSelector).BypassEmptyMatcherCheck = true + } + for i, arg := range n.Args { if i >= len(n.Func.ArgTypes) { if n.Func.Variadic == 0 { @@ -832,17 +845,19 @@ func (p *parser) checkAST(node Node) (typ ValueType) { // metric name is a non-empty matcher. break } - // A Vector selector must contain at least one non-empty matcher to prevent - // implicit selection of all metrics (e.g. by a typo). - notEmpty := false - for _, lm := range n.LabelMatchers { - if lm != nil && !lm.Matches("") { - notEmpty = true - break + if !n.BypassEmptyMatcherCheck { + // A Vector selector must contain at least one non-empty matcher to prevent + // implicit selection of all metrics (e.g. by a typo). + notEmpty := false + for _, lm := range n.LabelMatchers { + if lm != nil && !lm.Matches("") { + notEmpty = true + break + } + } + if !notEmpty { + p.addParseErrf(n.PositionRange(), "vector selector must contain at least one non-empty matcher") } - } - if !notEmpty { - p.addParseErrf(n.PositionRange(), "vector selector must contain at least one non-empty matcher") } case *NumberLiteral, *StringLiteral: diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go b/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go index f0649a77a82..e078bcb60bb 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/test.go @@ -39,6 +39,7 @@ import ( "github.com/prometheus/prometheus/promql/parser/posrange" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/almost" + "github.com/prometheus/prometheus/util/convertnhcb" "github.com/prometheus/prometheus/util/teststorage" "github.com/prometheus/prometheus/util/testutil" ) @@ -46,8 +47,8 @@ import ( var ( patSpace = regexp.MustCompile("[\t ]+") patLoad = regexp.MustCompile(`^load(?:_(with_nhcb))?\s+(.+?)$`) - patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) - patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) + patEvalInstant = regexp.MustCompile(`^eval(?:_(fail|warn|ordered|info))?\s+instant\s+(?:at\s+(.+?))?\s+(.+)$`) + patEvalRange = regexp.MustCompile(`^eval(?:_(fail|warn|info))?\s+range\s+from\s+(.+)\s+to\s+(.+)\s+step\s+(.+?)\s+(.+)$`) ) const ( @@ -321,6 +322,8 @@ func (t *test) parseEval(lines []string, i int) (int, *evalCmd, error) { cmd.fail = true case "warn": cmd.warn = true + case "info": + cmd.info = true } for j := 1; i+1 < len(lines); j++ { @@ -477,43 +480,22 @@ func (cmd *loadCmd) append(a storage.Appender) error { return nil } -func getHistogramMetricBase(m labels.Labels, suffix string) (labels.Labels, uint64) { - mName := m.Get(labels.MetricName) - baseM := labels.NewBuilder(m). - Set(labels.MetricName, strings.TrimSuffix(mName, suffix)). - Del(labels.BucketLabel). - Labels() - hash := baseM.Hash() - return baseM, hash -} - type tempHistogramWrapper struct { metric labels.Labels upperBounds []float64 - histogramByTs map[int64]tempHistogram + histogramByTs map[int64]convertnhcb.TempHistogram } func newTempHistogramWrapper() tempHistogramWrapper { return tempHistogramWrapper{ upperBounds: []float64{}, - histogramByTs: map[int64]tempHistogram{}, + histogramByTs: map[int64]convertnhcb.TempHistogram{}, } } -type tempHistogram struct { - bucketCounts map[float64]float64 - count float64 - sum float64 -} - -func newTempHistogram() tempHistogram { - return tempHistogram{ - bucketCounts: map[float64]float64{}, - } -} - -func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistogramWrapper func(*tempHistogramWrapper), updateHistogram func(*tempHistogram, float64)) { - m2, m2hash := getHistogramMetricBase(m, suffix) +func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap map[uint64]tempHistogramWrapper, smpls []promql.Sample, updateHistogramWrapper func(*tempHistogramWrapper), updateHistogram func(*convertnhcb.TempHistogram, float64)) { + m2 := convertnhcb.GetHistogramMetricBase(m, suffix) + m2hash := m2.Hash() histogramWrapper, exists := histogramMap[m2hash] if !exists { histogramWrapper = newTempHistogramWrapper() @@ -528,7 +510,7 @@ func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap } histogram, exists := histogramWrapper.histogramByTs[s.T] if !exists { - histogram = newTempHistogram() + histogram = convertnhcb.NewTempHistogram() } updateHistogram(&histogram, s.F) histogramWrapper.histogramByTs[s.T] = histogram @@ -536,34 +518,6 @@ func processClassicHistogramSeries(m labels.Labels, suffix string, histogramMap histogramMap[m2hash] = histogramWrapper } -func processUpperBoundsAndCreateBaseHistogram(upperBounds0 []float64) ([]float64, *histogram.FloatHistogram) { - sort.Float64s(upperBounds0) - upperBounds := make([]float64, 0, len(upperBounds0)) - prevLE := math.Inf(-1) - for _, le := range upperBounds0 { - if le != prevLE { // deduplicate - upperBounds = append(upperBounds, le) - prevLE = le - } - } - var customBounds []float64 - if upperBounds[len(upperBounds)-1] == math.Inf(1) { - customBounds = upperBounds[:len(upperBounds)-1] - } else { - customBounds = upperBounds - } - return upperBounds, &histogram.FloatHistogram{ - Count: 0, - Sum: 0, - Schema: histogram.CustomBucketsSchema, - PositiveSpans: []histogram.Span{ - {Offset: 0, Length: uint32(len(upperBounds))}, - }, - PositiveBuckets: make([]float64, len(upperBounds)), - CustomValues: customBounds, - } -} - // If classic histograms are defined, convert them into native histograms with custom // bounds and append the defined time series to the storage. func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { @@ -582,16 +536,16 @@ func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { } processClassicHistogramSeries(m, "_bucket", histogramMap, smpls, func(histogramWrapper *tempHistogramWrapper) { histogramWrapper.upperBounds = append(histogramWrapper.upperBounds, le) - }, func(histogram *tempHistogram, f float64) { - histogram.bucketCounts[le] = f + }, func(histogram *convertnhcb.TempHistogram, f float64) { + histogram.BucketCounts[le] = f }) case strings.HasSuffix(mName, "_count"): - processClassicHistogramSeries(m, "_count", histogramMap, smpls, nil, func(histogram *tempHistogram, f float64) { - histogram.count = f + processClassicHistogramSeries(m, "_count", histogramMap, smpls, nil, func(histogram *convertnhcb.TempHistogram, f float64) { + histogram.Count = f }) case strings.HasSuffix(mName, "_sum"): - processClassicHistogramSeries(m, "_sum", histogramMap, smpls, nil, func(histogram *tempHistogram, f float64) { - histogram.sum = f + processClassicHistogramSeries(m, "_sum", histogramMap, smpls, nil, func(histogram *convertnhcb.TempHistogram, f float64) { + histogram.Sum = f }) } } @@ -599,30 +553,21 @@ func (cmd *loadCmd) appendCustomHistogram(a storage.Appender) error { // Convert the collated classic histogram data into native histograms // with custom bounds and append them to the storage. for _, histogramWrapper := range histogramMap { - upperBounds, fhBase := processUpperBoundsAndCreateBaseHistogram(histogramWrapper.upperBounds) + upperBounds, hBase := convertnhcb.ProcessUpperBoundsAndCreateBaseHistogram(histogramWrapper.upperBounds, true) + fhBase := hBase.ToFloat(nil) samples := make([]promql.Sample, 0, len(histogramWrapper.histogramByTs)) for t, histogram := range histogramWrapper.histogramByTs { - fh := fhBase.Copy() - var prevCount, total float64 - for i, le := range upperBounds { - currCount, exists := histogram.bucketCounts[le] - if !exists { - currCount = 0 + h, fh := convertnhcb.NewHistogram(histogram, upperBounds, hBase, fhBase) + if fh == nil { + if err := h.Validate(); err != nil { + return err } - count := currCount - prevCount - fh.PositiveBuckets[i] = count - total += count - prevCount = currCount - } - fh.Sum = histogram.sum - if histogram.count != 0 { - total = histogram.count + fh = h.ToFloat(nil) } - fh.Count = total - s := promql.Sample{T: t, H: fh.Compact(0)} - if err := s.H.Validate(); err != nil { + if err := fh.Validate(); err != nil { return err } + s := promql.Sample{T: t, H: fh} samples = append(samples, s) } sort.Slice(samples, func(i, j int) bool { return samples[i].T < samples[j].T }) @@ -657,10 +602,10 @@ type evalCmd struct { step time.Duration line int - isRange bool // if false, instant query - fail, warn, ordered bool - expectedFailMessage string - expectedFailRegexp *regexp.Regexp + isRange bool // if false, instant query + fail, warn, ordered, info bool + expectedFailMessage string + expectedFailRegexp *regexp.Regexp metrics map[uint64]labels.Labels expectScalar bool @@ -1208,13 +1153,16 @@ func (t *test) runInstantQuery(iq atModifierTestCase, cmd *evalCmd, engine promq if res.Err == nil && cmd.fail { return fmt.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line) } - countWarnings, _ := res.Warnings.CountWarningsAndInfo() + countWarnings, countInfo := res.Warnings.CountWarningsAndInfo() if !cmd.warn && countWarnings > 0 { return fmt.Errorf("unexpected warnings evaluating query %q (line %d): %v", iq.expr, cmd.line, res.Warnings) } if cmd.warn && countWarnings == 0 { return fmt.Errorf("expected warnings evaluating query %q (line %d) but got none", iq.expr, cmd.line) } + if cmd.info && countInfo == 0 { + return fmt.Errorf("expected info annotations evaluating query %q (line %d) but got none", iq.expr, cmd.line) + } err = cmd.compareResult(res.Value) if err != nil { return fmt.Errorf("error in %s %s (line %d): %w", cmd, iq.expr, cmd.line, err) diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test index 68d2e735b37..e2eb381dbcb 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/aggregators.test @@ -250,7 +250,7 @@ clear load 5m http_requests{job="api-server", instance="0", group="production"} 0+10x10 http_requests{job="api-server", instance="1", group="production"} 0+20x10 - http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN + http_requests{job="api-server", instance="2", group="production"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN http_requests{job="api-server", instance="0", group="canary"} 0+30x10 http_requests{job="api-server", instance="1", group="canary"} 0+40x10 http_requests{job="app-server", instance="0", group="production"} 0+50x10 @@ -337,32 +337,32 @@ load 5m version{job="app-server", instance="0", group="canary"} 7 version{job="app-server", instance="1", group="canary"} 7 -eval instant at 5m count_values("version", version) +eval instant at 1m count_values("version", version) {version="6"} 5 {version="7"} 2 {version="8"} 2 -eval instant at 5m count_values(((("version"))), version) +eval instant at 1m count_values(((("version"))), version) {version="6"} 5 {version="7"} 2 {version="8"} 2 -eval instant at 5m count_values without (instance)("version", version) +eval instant at 1m count_values without (instance)("version", version) {job="api-server", group="production", version="6"} 3 {job="api-server", group="canary", version="8"} 2 {job="app-server", group="production", version="6"} 2 {job="app-server", group="canary", version="7"} 2 # Overwrite label with output. Don't do this. -eval instant at 5m count_values without (instance)("job", version) +eval instant at 1m count_values without (instance)("job", version) {job="6", group="production"} 5 {job="8", group="canary"} 2 {job="7", group="canary"} 2 # Overwrite label with output. Don't do this. -eval instant at 5m count_values by (job, group)("job", version) +eval instant at 1m count_values by (job, group)("job", version) {job="6", group="production"} 5 {job="8", group="canary"} 2 {job="7", group="canary"} 2 @@ -572,3 +572,160 @@ clear # #eval instant at 1m count(topk(1,max(up) without()) == topk(1,max(up) without()) == topk(1,max(up) without()) == topk(1,max(up) without()) == topk(1,max(up) without())) # {} 1 + +clear + +# Test stddev produces consistent results regardless the order the data is loaded in. +load 5m + series{label="a"} 1 + series{label="b"} 2 + series{label="c"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}} + +eval instant at 0m stddev(series) + {} 0.5 + +eval instant at 0m stdvar(series) + {} 0.25 + +eval instant at 0m stddev by (label) (series) + {label="a"} 0 + {label="b"} 0 + +eval instant at 0m stdvar by (label) (series) + {label="a"} 0 + {label="b"} 0 + +clear + +load 5m + series{label="a"} {{schema:1 sum:15 count:10 buckets:[3 2 5 7 9]}} + series{label="b"} 1 + series{label="c"} 2 + +eval instant at 0m stddev(series) + {} 0.5 + +eval instant at 0m stdvar(series) + {} 0.25 + +eval instant at 0m stddev by (label) (series) + {label="b"} 0 + {label="c"} 0 + +eval instant at 0m stdvar by (label) (series) + {label="b"} 0 + {label="c"} 0 + +clear + +load 5m + series{label="a"} 1 + series{label="b"} 2 + series{label="c"} NaN + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN + +eval instant at 0m stddev by (label) (series) + {label="a"} 0 + {label="b"} 0 + {label="c"} NaN + +eval instant at 0m stdvar by (label) (series) + {label="a"} 0 + {label="b"} 0 + {label="c"} NaN + +clear + +load 5m + series{label="a"} NaN + series{label="b"} 1 + series{label="c"} 2 + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN + +eval instant at 0m stddev by (label) (series) + {label="a"} NaN + {label="b"} 0 + {label="c"} 0 + +eval instant at 0m stdvar by (label) (series) + {label="a"} NaN + {label="b"} 0 + {label="c"} 0 + +clear + +load 5m + series NaN + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN + +clear + +load 5m + series{label="a"} 1 + series{label="b"} 2 + series{label="c"} inf + +eval instant at 0m stddev (series) + {} NaN + +eval instant at 0m stdvar (series) + {} NaN + +eval instant at 0m stddev by (label) (series) + {label="a"} 0 + {label="b"} 0 + {label="c"} NaN + +eval instant at 0m stdvar by (label) (series) + {label="a"} 0 + {label="b"} 0 + {label="c"} NaN + +clear + +load 5m + series{label="a"} inf + series{label="b"} 1 + series{label="c"} 2 + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN + +eval instant at 0m stddev by (label) (series) + {label="a"} NaN + {label="b"} 0 + {label="c"} 0 + +eval instant at 0m stdvar by (label) (series) + {label="a"} NaN + {label="b"} 0 + {label="c"} 0 + +clear + +load 5m + series inf + +eval instant at 0m stddev(series) + {} NaN + +eval instant at 0m stdvar(series) + {} NaN diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test index 35f90ee6714..4091f7eabf2 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/at_modifier.test @@ -121,45 +121,43 @@ eval instant at 25s sum_over_time(metric{job="1"}[100:1] offset 20 @ 100) # Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries. # Inner most sum=1+2+...+10=55. -# With [100s:25s] subquery, it's 55*5. +# With [100s:25s] subquery, it's 55*4. eval instant at 100s sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50) - {job="1"} 275 + {job="1"} 220 # Nested subqueries with different timestamps on both. # Since vector selector has timestamp, the result value does not depend on the timestamp of subqueries. -# Sum of innermost subquery is 275 as above. The outer subquery repeats it 4 times. +# Sum of innermost subquery is 220 as above. The outer subquery repeats it 3 times. eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[100s] @ 100)[100s:25s] @ 50)[3s:1s] @ 3000) - {job="1"} 1100 + {job="1"} 660 # Testing the inner subquery timestamp since vector selector does not have @. # Inner sum for subquery [100s:25s] @ 50 are -# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=4+5=9. -# This sum of 11 is repeated 4 times by outer subquery. +# at -50 nothing, at -25 nothing, at 0=0, at 25=2, at 50=5. +# This sum of 7 is repeated 3 times by outer subquery. eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 50)[3s:1s] @ 200) - {job="1"} 44 + {job="1"} 21 # Inner sum for subquery [100s:25s] @ 200 are -# at 100=9+10, at 125=12, at 150=14+15, at 175=17, at 200=19+20. -# This sum of 116 is repeated 4 times by outer subquery. +# at 125=12, at 150=15, at 175=17, at 200=20. +# This sum of 64 is repeated 3 times by outer subquery. eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[10s])[100s:25s] @ 200)[3s:1s] @ 50) - {job="1"} 464 + {job="1"} 192 # Nested subqueries with timestamp only on outer subquery. # Outer most subquery: -# at 900=783 -# inner subquery: at 870=87+86+85, at 880=88+87+86, at 890=89+88+87 -# at 925=537 -# inner subquery: at 895=89+88, at 905=90+89, at 915=90+91 -# at 950=828 -# inner subquery: at 920=92+91+90, at 930=93+92+91, at 940=94+93+92 -# at 975=567 -# inner subquery: at 945=94+93, at 955=95+94, at 965=96+95 -# at 1000=873 -# inner subquery: at 970=97+96+95, at 980=98+97+96, at 990=99+98+97 +# at 925=360 +# inner subquery: at 905=90+89, at 915=91+90 +# at 950=372 +# inner subquery: at 930=93+92, at 940=94+93 +# at 975=380 +# inner subquery: at 955=95+94, at 965=96+95 +# at 1000=392 +# inner subquery: at 980=98+97, at 990=99+98 eval instant at 0s sum_over_time(sum_over_time(sum_over_time(metric{job="1"}[20s])[20s:10s] offset 10s)[100s:25s] @ 1000) - {job="1"} 3588 + {job="1"} 1504 # minute is counted on the value of the sample. eval instant at 10s minute(metric @ 1500) @@ -182,32 +180,32 @@ eval instant at 15m timestamp(timestamp(metric{job="1"} @ 10)) # minute is counted on the value of the sample. eval instant at 0s sum_over_time(minute(metric @ 1500)[100s:10s]) - {job="1"} 22 - {job="2"} 55 + {job="1"} 20 + {job="2"} 50 # If nothing passed, minute() takes eval time. # Here the eval time is determined by the subquery. # [50m:1m] at 6000, i.e. 100m, is 50m to 100m. -# sum=50+51+52+...+59+0+1+2+...+40. +# sum=51+52+...+59+0+1+2+...+40. eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000) - {} 1365 + {} 1315 -# sum=45+46+47+...+59+0+1+2+...+35. +# sum=46+47+...+59+0+1+2+...+35. eval instant at 0s sum_over_time(minute()[50m:1m] @ 6000 offset 5m) - {} 1410 + {} 1365 # time() is the eval time which is determined by subquery here. -# 2900+2901+...+3000 = (3000*3001 - 2899*2900)/2. +# 2901+...+3000 = (3000*3001 - 2899*2900)/2. eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000) - {} 297950 + {} 295050 -# 2300+2301+...+2400 = (2400*2401 - 2299*2300)/2. +# 2301+...+2400 = (2400*2401 - 2299*2300)/2. eval instant at 0s sum_over_time(vector(time())[100s:1s] @ 3000 offset 600s) - {} 237350 + {} 235050 # timestamp() takes the time of the sample and not the evaluation time. eval instant at 0s sum_over_time(timestamp(metric{job="1"} @ 10)[100s:10s] @ 3000) - {job="1"} 110 + {job="1"} 100 # The result of inner timestamp() will have the timestamp as the # eval time, hence entire expression is not step invariant and depends on eval time. diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test index 6e2b3630bcb..fb1d1696244 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/functions.test @@ -6,11 +6,13 @@ load 5m # Tests for resets(). eval instant at 50m resets(http_requests[5m]) + +eval instant at 50m resets(http_requests[10m]) {path="/foo"} 0 {path="/bar"} 0 {path="/biz"} 0 -eval instant at 50m resets(http_requests[300]) +eval instant at 50m resets(http_requests[600]) {path="/foo"} 0 {path="/bar"} 0 {path="/biz"} 0 @@ -21,6 +23,11 @@ eval instant at 50m resets(http_requests[20m]) {path="/biz"} 0 eval instant at 50m resets(http_requests[30m]) + {path="/foo"} 1 + {path="/bar"} 0 + {path="/biz"} 0 + +eval instant at 50m resets(http_requests[32m]) {path="/foo"} 2 {path="/bar"} 1 {path="/biz"} 0 @@ -34,28 +41,30 @@ eval instant at 50m resets(nonexistent_metric[50m]) # Tests for changes(). eval instant at 50m changes(http_requests[5m]) + +eval instant at 50m changes(http_requests[6m]) {path="/foo"} 0 {path="/bar"} 0 {path="/biz"} 0 eval instant at 50m changes(http_requests[20m]) - {path="/foo"} 3 - {path="/bar"} 3 + {path="/foo"} 2 + {path="/bar"} 2 {path="/biz"} 0 eval instant at 50m changes(http_requests[30m]) - {path="/foo"} 4 - {path="/bar"} 5 - {path="/biz"} 1 + {path="/foo"} 3 + {path="/bar"} 4 + {path="/biz"} 0 eval instant at 50m changes(http_requests[50m]) - {path="/foo"} 8 - {path="/bar"} 9 + {path="/foo"} 7 + {path="/bar"} 8 {path="/biz"} 1 eval instant at 50m changes((http_requests[50m])) - {path="/foo"} 8 - {path="/bar"} 9 + {path="/foo"} 7 + {path="/bar"} 8 {path="/biz"} 1 eval instant at 50m changes(nonexistent_metric[50m]) @@ -66,7 +75,7 @@ load 5m x{a="b"} NaN NaN NaN x{a="c"} 0 NaN 0 -eval instant at 15m changes(x[15m]) +eval instant at 15m changes(x[20m]) {a="b"} 0 {a="c"} 2 @@ -75,14 +84,14 @@ clear # Tests for increase(). load 5m http_requests{path="/foo"} 0+10x10 - http_requests{path="/bar"} 0+10x5 0+10x5 + http_requests{path="/bar"} 0+18x5 0+18x5 http_requests{path="/dings"} 10+10x10 http_requests{path="/bumms"} 1+10x10 # Tests for increase(). eval instant at 50m increase(http_requests[50m]) {path="/foo"} 100 - {path="/bar"} 90 + {path="/bar"} 160 {path="/dings"} 100 {path="/bumms"} 100 @@ -95,7 +104,7 @@ eval instant at 50m increase(http_requests[50m]) # value, and therefore the extrapolation happens only by 30s. eval instant at 50m increase(http_requests[100m]) {path="/foo"} 100 - {path="/bar"} 90 + {path="/bar"} 162 {path="/dings"} 105 {path="/bumms"} 101 @@ -115,15 +124,17 @@ clear # Tests for rate(). load 5m - testcounter_reset_middle 0+10x4 0+10x5 + testcounter_reset_middle 0+27x4 0+27x5 testcounter_reset_end 0+10x9 0 10 # Counter resets at in the middle of range are handled correctly by rate(). eval instant at 50m rate(testcounter_reset_middle[50m]) - {} 0.03 + {} 0.08 # Counter resets at end of range are ignored by rate(). eval instant at 50m rate(testcounter_reset_end[5m]) + +eval instant at 50m rate(testcounter_reset_end[6m]) {} 0 clear @@ -242,24 +253,24 @@ eval instant at 50m deriv(testcounter_reset_middle[100m]) # intercept at t=3000: 38.63636363636364 # intercept at t=3000+3600: 76.81818181818181 eval instant at 50m predict_linear(testcounter_reset_middle[50m], 3600) - {} 76.81818181818181 + {} 70 eval instant at 50m predict_linear(testcounter_reset_middle[50m], 1h) - {} 76.81818181818181 + {} 70 # intercept at t = 3000+3600 = 6600 -eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) +eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) {} 76.81818181818181 -eval instant at 50m predict_linear(testcounter_reset_middle[50m] @ 3000, 1h) +eval instant at 50m predict_linear(testcounter_reset_middle[55m] @ 3000, 1h) {} 76.81818181818181 # intercept at t = 600+3600 = 4200 -eval instant at 10m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) +eval instant at 10m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) {} 51.36363636363637 # intercept at t = 4200+3600 = 7800 -eval instant at 70m predict_linear(testcounter_reset_middle[50m] @ 3000, 3600) +eval instant at 70m predict_linear(testcounter_reset_middle[55m] @ 3000, 3600) {} 89.54545454545455 # With http_requests, there is a sample value exactly at the end of @@ -467,7 +478,7 @@ load 5m http_requests{job="api-server", instance="1", group="production"} 0+20x10 http_requests{job="api-server", instance="0", group="canary"} 0+30x10 http_requests{job="api-server", instance="1", group="canary"} 0+40x10 - http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN + http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN http_requests{job="app-server", instance="0", group="production"} 0+50x10 http_requests{job="app-server", instance="1", group="production"} 0+60x10 http_requests{job="app-server", instance="0", group="canary"} 0+70x10 @@ -502,7 +513,7 @@ load 5m http_requests{job="api-server", instance="1", group="production"} 0+20x10 http_requests{job="api-server", instance="0", group="canary"} 0+30x10 http_requests{job="api-server", instance="1", group="canary"} 0+40x10 - http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN + http_requests{job="api-server", instance="2", group="canary"} NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN http_requests{job="app-server", instance="0", group="production"} 0+50x10 http_requests{job="app-server", instance="1", group="production"} 0+60x10 http_requests{job="app-server", instance="0", group="canary"} 0+70x10 @@ -640,7 +651,7 @@ eval_ordered instant at 50m sort_by_label(node_uname_info, "release") node_uname_info{job="node_exporter", instance="4m5", release="1.11.3"} 100 node_uname_info{job="node_exporter", instance="4m1000", release="1.111.3"} 100 -# Tests for holt_winters +# Tests for double_exponential_smoothing clear # positive trends @@ -650,7 +661,7 @@ load 10s http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300+80x1000 http_requests{job="api-server", instance="1", group="canary"} 0+40x2000 -eval instant at 8000s holt_winters(http_requests[1m], 0.01, 0.1) +eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) {job="api-server", instance="0", group="production"} 8000 {job="api-server", instance="1", group="production"} 16000 {job="api-server", instance="0", group="canary"} 24000 @@ -664,7 +675,7 @@ load 10s http_requests{job="api-server", instance="0", group="canary"} 0+30x1000 300-80x1000 http_requests{job="api-server", instance="1", group="canary"} 0-40x1000 0+40x1000 -eval instant at 8000s holt_winters(http_requests[1m], 0.01, 0.1) +eval instant at 8000s double_exponential_smoothing(http_requests[1m], 0.01, 0.1) {job="api-server", instance="0", group="production"} 0 {job="api-server", instance="1", group="production"} -16000 {job="api-server", instance="0", group="canary"} 24000 @@ -688,10 +699,10 @@ load 10s metric9 -9.988465674311579e+307 -9.988465674311579e+307 -9.988465674311579e+307 metric10 -9.988465674311579e+307 9.988465674311579e+307 -eval instant at 1m avg_over_time(metric[1m]) +eval instant at 55s avg_over_time(metric[1m]) {} 3 -eval instant at 1m sum_over_time(metric[1m])/count_over_time(metric[1m]) +eval instant at 55s sum_over_time(metric[1m])/count_over_time(metric[1m]) {} 3 eval instant at 1m avg_over_time(metric2[1m]) @@ -758,8 +769,8 @@ eval instant at 1m avg_over_time(metric8[1m]) {} 9.988465674311579e+307 # This overflows float64. -eval instant at 1m sum_over_time(metric8[1m])/count_over_time(metric8[1m]) - {} Inf +eval instant at 1m sum_over_time(metric8[2m])/count_over_time(metric8[2m]) + {} +Inf eval instant at 1m avg_over_time(metric9[1m]) {} -9.988465674311579e+307 @@ -768,10 +779,16 @@ eval instant at 1m avg_over_time(metric9[1m]) eval instant at 1m sum_over_time(metric9[1m])/count_over_time(metric9[1m]) {} -Inf -eval instant at 1m avg_over_time(metric10[1m]) +eval instant at 45s avg_over_time(metric10[1m]) + {} 0 + +eval instant at 1m avg_over_time(metric10[2m]) {} 0 -eval instant at 1m sum_over_time(metric10[1m])/count_over_time(metric10[1m]) +eval instant at 45s sum_over_time(metric10[1m])/count_over_time(metric10[1m]) + {} 0 + +eval instant at 1m sum_over_time(metric10[2m])/count_over_time(metric10[2m]) {} 0 # Test if very big intermediate values cause loss of detail. @@ -779,10 +796,10 @@ clear load 10s metric 1 1e100 1 -1e100 -eval instant at 1m sum_over_time(metric[1m]) +eval instant at 1m sum_over_time(metric[2m]) {} 2 -eval instant at 1m avg_over_time(metric[1m]) +eval instant at 1m avg_over_time(metric[2m]) {} 0.5 # Tests for stddev_over_time and stdvar_over_time. @@ -790,13 +807,13 @@ clear load 10s metric 0 8 8 2 3 -eval instant at 1m stdvar_over_time(metric[1m]) +eval instant at 1m stdvar_over_time(metric[2m]) {} 10.56 -eval instant at 1m stddev_over_time(metric[1m]) +eval instant at 1m stddev_over_time(metric[2m]) {} 3.249615 -eval instant at 1m stddev_over_time((metric[1m])) +eval instant at 1m stddev_over_time((metric[2m])) {} 3.249615 # Tests for stddev_over_time and stdvar_over_time #4927. @@ -826,42 +843,42 @@ load 10s data{test="three samples"} 0 1 2 data{test="uneven samples"} 0 1 4 -eval instant at 1m quantile_over_time(0, data[1m]) +eval instant at 1m quantile_over_time(0, data[2m]) {test="two samples"} 0 {test="three samples"} 0 {test="uneven samples"} 0 -eval instant at 1m quantile_over_time(0.5, data[1m]) +eval instant at 1m quantile_over_time(0.5, data[2m]) {test="two samples"} 0.5 {test="three samples"} 1 {test="uneven samples"} 1 -eval instant at 1m quantile_over_time(0.75, data[1m]) +eval instant at 1m quantile_over_time(0.75, data[2m]) {test="two samples"} 0.75 {test="three samples"} 1.5 {test="uneven samples"} 2.5 -eval instant at 1m quantile_over_time(0.8, data[1m]) +eval instant at 1m quantile_over_time(0.8, data[2m]) {test="two samples"} 0.8 {test="three samples"} 1.6 {test="uneven samples"} 2.8 -eval instant at 1m quantile_over_time(1, data[1m]) +eval instant at 1m quantile_over_time(1, data[2m]) {test="two samples"} 1 {test="three samples"} 2 {test="uneven samples"} 4 -eval_warn instant at 1m quantile_over_time(-1, data[1m]) +eval_warn instant at 1m quantile_over_time(-1, data[2m]) {test="two samples"} -Inf {test="three samples"} -Inf {test="uneven samples"} -Inf -eval_warn instant at 1m quantile_over_time(2, data[1m]) +eval_warn instant at 1m quantile_over_time(2, data[2m]) {test="two samples"} +Inf {test="three samples"} +Inf {test="uneven samples"} +Inf -eval_warn instant at 1m (quantile_over_time(2, (data[1m]))) +eval_warn instant at 1m (quantile_over_time(2, (data[2m]))) {test="two samples"} +Inf {test="three samples"} +Inf {test="uneven samples"} +Inf @@ -969,21 +986,21 @@ load 10s data{type="some_nan3"} NaN 0 1 data{type="only_nan"} NaN NaN NaN -eval instant at 1m min_over_time(data[1m]) +eval instant at 1m min_over_time(data[2m]) {type="numbers"} 0 {type="some_nan"} 0 {type="some_nan2"} 1 {type="some_nan3"} 0 {type="only_nan"} NaN -eval instant at 1m max_over_time(data[1m]) +eval instant at 1m max_over_time(data[2m]) {type="numbers"} 3 {type="some_nan"} 2 {type="some_nan2"} 2 {type="some_nan3"} 1 {type="only_nan"} NaN -eval instant at 1m last_over_time(data[1m]) +eval instant at 1m last_over_time(data[2m]) data{type="numbers"} 3 data{type="some_nan"} NaN data{type="some_nan2"} 1 @@ -1076,13 +1093,19 @@ eval instant at 1m absent_over_time(httpd_log_lines_total[30s]) {} 1 eval instant at 15m absent_over_time(http_requests[5m]) - -eval instant at 16m absent_over_time(http_requests[5m]) {} 1 +eval instant at 15m absent_over_time(http_requests[10m]) + eval instant at 16m absent_over_time(http_requests[6m]) + {} 1 + +eval instant at 16m absent_over_time(http_requests[16m]) eval instant at 16m absent_over_time(httpd_handshake_failures_total[1m]) + {} 1 + +eval instant at 16m absent_over_time(httpd_handshake_failures_total[2m]) eval instant at 16m absent_over_time({instance="127.0.0.1"}[5m]) @@ -1138,17 +1161,18 @@ eval instant at 0m present_over_time(httpd_log_lines_total[30s]) eval instant at 1m present_over_time(httpd_log_lines_total[30s]) eval instant at 15m present_over_time(http_requests[5m]) + +eval instant at 15m present_over_time(http_requests[10m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 -eval instant at 16m present_over_time(http_requests[5m]) - eval instant at 16m present_over_time(http_requests[6m]) + +eval instant at 16m present_over_time(http_requests[16m]) {instance="127.0.0.1", job="httpd", path="/bar"} 1 {instance="127.0.0.1", job="httpd", path="/foo"} 1 eval instant at 16m present_over_time(httpd_handshake_failures_total[1m]) - {instance="127.0.0.1", job="node"} 1 eval instant at 16m present_over_time({instance="127.0.0.1"}[5m]) {instance="127.0.0.1",job="node"} 1 @@ -1169,59 +1193,59 @@ load 5m exp_root_log{l="x"} 10 exp_root_log{l="y"} 20 -eval instant at 5m exp(exp_root_log) +eval instant at 1m exp(exp_root_log) {l="x"} 22026.465794806718 {l="y"} 485165195.4097903 -eval instant at 5m exp(exp_root_log - 10) +eval instant at 1m exp(exp_root_log - 10) {l="y"} 22026.465794806718 {l="x"} 1 -eval instant at 5m exp(exp_root_log - 20) +eval instant at 1m exp(exp_root_log - 20) {l="x"} 4.5399929762484854e-05 {l="y"} 1 -eval instant at 5m ln(exp_root_log) +eval instant at 1m ln(exp_root_log) {l="x"} 2.302585092994046 {l="y"} 2.995732273553991 -eval instant at 5m ln(exp_root_log - 10) +eval instant at 1m ln(exp_root_log - 10) {l="y"} 2.302585092994046 {l="x"} -Inf -eval instant at 5m ln(exp_root_log - 20) +eval instant at 1m ln(exp_root_log - 20) {l="y"} -Inf {l="x"} NaN -eval instant at 5m exp(ln(exp_root_log)) +eval instant at 1m exp(ln(exp_root_log)) {l="y"} 20 {l="x"} 10 -eval instant at 5m sqrt(exp_root_log) +eval instant at 1m sqrt(exp_root_log) {l="x"} 3.1622776601683795 {l="y"} 4.47213595499958 -eval instant at 5m log2(exp_root_log) +eval instant at 1m log2(exp_root_log) {l="x"} 3.3219280948873626 {l="y"} 4.321928094887363 -eval instant at 5m log2(exp_root_log - 10) +eval instant at 1m log2(exp_root_log - 10) {l="y"} 3.3219280948873626 {l="x"} -Inf -eval instant at 5m log2(exp_root_log - 20) +eval instant at 1m log2(exp_root_log - 20) {l="x"} NaN {l="y"} -Inf -eval instant at 5m log10(exp_root_log) +eval instant at 1m log10(exp_root_log) {l="x"} 1 {l="y"} 1.301029995663981 -eval instant at 5m log10(exp_root_log - 10) +eval instant at 1m log10(exp_root_log - 10) {l="y"} 1 {l="x"} -Inf -eval instant at 5m log10(exp_root_log - 20) +eval instant at 1m log10(exp_root_log - 20) {l="x"} NaN {l="y"} -Inf @@ -1234,3 +1258,12 @@ load 1m # We expect the value to be 0 for t=0s to t=59s (inclusive), then 60 for t=60s and t=61s. eval range from 0 to 61s step 1s timestamp(metric) {} 0x59 60 60 + +clear + +# Check round with mixed data types +load 1m + mixed_metric {{schema:0 sum:5 count:4 buckets:[1 2 1]}} 1 2 3 {{schema:0 sum:5 count:4 buckets:[1 2 1]}} {{schema:0 sum:8 count:6 buckets:[1 4 1]}} + +eval range from 0 to 5m step 1m round(mixed_metric) + {} _ 1 2 3 diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test index 47cba799352..6089fd01d20 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/histograms.test @@ -108,8 +108,8 @@ eval instant at 50m histogram_stdvar(testhistogram3) eval instant at 50m histogram_fraction(0, 0.2, testhistogram3) {start="positive"} 0.6363636363636364 {start="negative"} 0 - -eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m])) + +eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[10m])) {start="positive"} 0.6363636363636364 {start="negative"} 0 @@ -118,8 +118,8 @@ eval instant at 50m histogram_fraction(0, 0.2, rate(testhistogram3[5m])) eval instant at 50m testhistogram3_bucket{le=".2"} / ignoring(le) testhistogram3_count {start="positive"} 0.6363636363636364 - -eval instant at 50m rate(testhistogram3_bucket{le=".2"}[5m]) / ignoring(le) rate(testhistogram3_count[5m]) + +eval instant at 50m rate(testhistogram3_bucket{le=".2"}[10m]) / ignoring(le) rate(testhistogram3_count[10m]) {start="positive"} 0.6363636363636364 # Test histogram_quantile, native and classic. @@ -241,28 +241,27 @@ eval instant at 50m histogram_quantile(0.8, testhistogram_bucket) {start="negative"} 0.3 # More realistic with rates. - -eval instant at 50m histogram_quantile(0.2, rate(testhistogram[5m])) +eval instant at 50m histogram_quantile(0.2, rate(testhistogram[10m])) {start="positive"} 0.048 {start="negative"} -0.2 -eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[5m])) +eval instant at 50m histogram_quantile(0.2, rate(testhistogram_bucket[10m])) {start="positive"} 0.048 {start="negative"} -0.2 -eval instant at 50m histogram_quantile(0.5, rate(testhistogram[5m])) +eval instant at 50m histogram_quantile(0.5, rate(testhistogram[10m])) {start="positive"} 0.15 {start="negative"} -0.15 -eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[5m])) +eval instant at 50m histogram_quantile(0.5, rate(testhistogram_bucket[10m])) {start="positive"} 0.15 {start="negative"} -0.15 -eval instant at 50m histogram_quantile(0.8, rate(testhistogram[5m])) +eval instant at 50m histogram_quantile(0.8, rate(testhistogram[10m])) {start="positive"} 0.72 {start="negative"} 0.3 -eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[5m])) +eval instant at 50m histogram_quantile(0.8, rate(testhistogram_bucket[10m])) {start="positive"} 0.72 {start="negative"} 0.3 @@ -307,115 +306,112 @@ eval instant at 47m histogram_quantile(5./6., rate(testhistogram2_bucket[15m])) # Aggregated histogram: Everything in one. Note how native histograms # don't require aggregation by le. -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m]))) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m]))) {} 0.075 -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le)) {} 0.075 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m]))) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m]))) {} 0.1277777777777778 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le)) {} 0.1277777777777778 # Aggregated histogram: Everything in one. Now with avg, which does not change anything. -eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[5m]))) +eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds[10m]))) {} 0.075 -eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.3, avg(rate(request_duration_seconds_bucket[10m])) by (le)) {} 0.075 -eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[5m]))) +eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds[10m]))) {} 0.12777777777777778 -eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[5m])) by (le)) +eval instant at 50m histogram_quantile(0.5, avg(rate(request_duration_seconds_bucket[10m])) by (le)) {} 0.12777777777777778 # Aggregated histogram: By instance. -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (instance)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (instance)) {instance="ins1"} 0.075 {instance="ins2"} 0.075 -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance)) {instance="ins1"} 0.075 {instance="ins2"} 0.075 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (instance)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (instance)) {instance="ins1"} 0.1333333333 {instance="ins2"} 0.125 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, instance)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, instance)) {instance="ins1"} 0.1333333333 {instance="ins2"} 0.125 # Aggregated histogram: By job. - -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job)) {job="job1"} 0.1 {job="job2"} 0.0642857142857143 -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job)) {job="job1"} 0.1 {job="job2"} 0.0642857142857143 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job)) {job="job1"} 0.14 {job="job2"} 0.1125 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job)) {job="job1"} 0.14 {job="job2"} 0.1125 # Aggregated histogram: By job and instance. - -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[5m])) by (job, instance)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds[10m])) by (job, instance)) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) +eval instant at 50m histogram_quantile(0.3, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance)) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[5m])) by (job, instance)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds[10m])) by (job, instance)) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.1333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.1166666666666667 -eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[5m])) by (le, job, instance)) +eval instant at 50m histogram_quantile(0.5, sum(rate(request_duration_seconds_bucket[10m])) by (le, job, instance)) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.1333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.1166666666666667 # The unaggregated histogram for comparison. Same result as the previous one. - -eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[5m])) +eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds[10m])) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[5m])) +eval instant at 50m histogram_quantile(0.3, rate(request_duration_seconds_bucket[10m])) {instance="ins1", job="job1"} 0.11 {instance="ins2", job="job1"} 0.09 {instance="ins1", job="job2"} 0.06 {instance="ins2", job="job2"} 0.0675 -eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[5m])) +eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds[10m])) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.13333333333333333 {instance="ins1", job="job2"} 0.1 {instance="ins2", job="job2"} 0.11666666666666667 -eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[5m])) +eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket[10m])) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} 0.13333333333333333 {instance="ins1", job="job2"} 0.1 @@ -425,6 +421,25 @@ eval instant at 50m histogram_quantile(0.5, rate(request_duration_seconds_bucket eval instant at 50m sum(request_duration_seconds) {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} +eval instant at 50m sum(request_duration_seconds{job="job1",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job1",instance="ins2"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins1"} + ignoring(job,instance) request_duration_seconds{job="job2",instance="ins2"}) + {} {{schema:-53 count:250 custom_values:[0.1 0.2] buckets:[100 90 60]}} + +eval instant at 50m avg(request_duration_seconds) + {} {{schema:-53 count:62.5 custom_values:[0.1 0.2] buckets:[25 22.5 15]}} + +# To verify the result above, calculate from classic histogram as well. +eval instant at 50m avg (request_duration_seconds_bucket{le="0.1"}) + {} 25 + +eval instant at 50m avg (request_duration_seconds_bucket{le="0.2"}) - avg (request_duration_seconds_bucket{le="0.1"}) + {} 22.5 + +eval instant at 50m avg (request_duration_seconds_bucket{le="+Inf"}) - avg (request_duration_seconds_bucket{le="0.2"}) + {} 15 + +eval instant at 50m count(request_duration_seconds) + {} 4 + # A histogram with nonmonotonic bucket counts. This may happen when recording # rule evaluation or federation races scrape ingestion, causing some buckets # counts to be derived from fewer samples. @@ -448,19 +463,19 @@ eval instant at 50m histogram_quantile(0.99, nonmonotonic_bucket) {} 979.75 # Buckets with different representations of the same upper bound. -eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[5m])) +eval instant at 50m histogram_quantile(0.5, rate(mixed_bucket[10m])) {instance="ins1", job="job1"} 0.15 {instance="ins2", job="job1"} NaN -eval instant at 50m histogram_quantile(0.5, rate(mixed[5m])) +eval instant at 50m histogram_quantile(0.5, rate(mixed[10m])) {instance="ins1", job="job1"} 0.2 {instance="ins2", job="job1"} NaN -eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[5m])) +eval instant at 50m histogram_quantile(0.75, rate(mixed_bucket[10m])) {instance="ins1", job="job1"} 0.2 {instance="ins2", job="job1"} NaN -eval instant at 50m histogram_quantile(1, rate(mixed_bucket[5m])) +eval instant at 50m histogram_quantile(1, rate(mixed_bucket[10m])) {instance="ins1", job="job1"} 0.2 {instance="ins2", job="job1"} NaN @@ -469,7 +484,7 @@ load_with_nhcb 5m empty_bucket{le="0.2", job="job1", instance="ins1"} 0x10 empty_bucket{le="+Inf", job="job1", instance="ins1"} 0x10 -eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[5m])) +eval instant at 50m histogram_quantile(0.2, rate(empty_bucket[10m])) {instance="ins1", job="job1"} NaN # Load a duplicate histogram with a different name to test failure scenario on multiple histograms with the same label set. @@ -508,3 +523,36 @@ eval instant at 5m histogram_quantile(1.0, sum by (le) (rate(const_histogram_buc eval instant at 5m histogram_quantile(1.0, sum(rate(const_histogram[5m]))) {} NaN + +load_with_nhcb 1m + histogram_over_time_bucket{le="0"} 0 1 3 9 + histogram_over_time_bucket{le="1"} 2 3 3 9 + histogram_over_time_bucket{le="2"} 3 8 5 10 + histogram_over_time_bucket{le="4"} 3 10 6 18 + +# Test custom buckets with sum_over_time, avg_over_time. +eval instant at 3m sum_over_time(histogram_over_time[4m:1m]) + {} {{schema:-53 count:37 custom_values:[0 1 2 4] buckets:[13 4 9 11]}} + +eval instant at 3m avg_over_time(histogram_over_time[4m:1m]) + {} {{schema:-53 count:9.25 custom_values:[0 1 2 4] buckets:[3.25 1 2.25 2.75]}} + +# Test custom buckets with counter reset +load_with_nhcb 5m + histogram_with_reset_bucket{le="1"} 1 3 9 + histogram_with_reset_bucket{le="2"} 3 3 9 + histogram_with_reset_bucket{le="4"} 8 5 12 + histogram_with_reset_bucket{le="8"} 10 6 18 + histogram_with_reset_sum{} 36 16 61 + +eval instant at 10m increase(histogram_with_reset[15m]) + {} {{schema:-53 count:27 sum:91.5 custom_values:[1 2 4 8] counter_reset_hint:gauge buckets:[13.5 0 4.5 9]}} + +eval instant at 10m resets(histogram_with_reset[15m]) + {} 1 + +eval instant at 10m histogram_count(increase(histogram_with_reset[15m])) + {} 27 + +eval instant at 10m histogram_sum(increase(histogram_with_reset[15m])) + {} 91.5 diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test index 1f1dac36021..5f5dcd5e4d1 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/name_label_dropping.test @@ -4,81 +4,81 @@ load 5m another_metric{env="1"} 60 120 180 # Does not drop __name__ for vector selector -eval instant at 15m metric{env="1"} +eval instant at 10m metric{env="1"} metric{env="1"} 120 # Drops __name__ for unary operators -eval instant at 15m -metric +eval instant at 10m -metric {env="1"} -120 # Drops __name__ for binary operators -eval instant at 15m metric + another_metric +eval instant at 10m metric + another_metric {env="1"} 300 # Does not drop __name__ for binary comparison operators -eval instant at 15m metric <= another_metric +eval instant at 10m metric <= another_metric metric{env="1"} 120 # Drops __name__ for binary comparison operators with "bool" modifier -eval instant at 15m metric <= bool another_metric +eval instant at 10m metric <= bool another_metric {env="1"} 1 # Drops __name__ for vector-scalar operations -eval instant at 15m metric * 2 +eval instant at 10m metric * 2 {env="1"} 240 # Drops __name__ for instant-vector functions -eval instant at 15m clamp(metric, 0, 100) +eval instant at 10m clamp(metric, 0, 100) {env="1"} 100 # Drops __name__ for range-vector functions -eval instant at 15m rate(metric{env="1"}[10m]) +eval instant at 10m rate(metric{env="1"}[10m]) {env="1"} 0.2 # Does not drop __name__ for last_over_time function -eval instant at 15m last_over_time(metric{env="1"}[10m]) +eval instant at 10m last_over_time(metric{env="1"}[10m]) metric{env="1"} 120 # Drops name for other _over_time functions -eval instant at 15m max_over_time(metric{env="1"}[10m]) +eval instant at 10m max_over_time(metric{env="1"}[10m]) {env="1"} 120 # Allows relabeling (to-be-dropped) __name__ via label_replace -eval instant at 15m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)") +eval instant at 10m label_replace(rate({env="1"}[10m]), "my_name", "rate_$1", "__name__", "(.+)") {my_name="rate_metric", env="1"} 0.2 {my_name="rate_another_metric", env="1"} 0.2 # Allows preserving __name__ via label_replace -eval instant at 15m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)") +eval instant at 10m label_replace(rate({env="1"}[10m]), "__name__", "rate_$1", "__name__", "(.+)") rate_metric{env="1"} 0.2 rate_another_metric{env="1"} 0.2 # Allows relabeling (to-be-dropped) __name__ via label_join -eval instant at 15m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__") +eval instant at 10m label_join(rate({env="1"}[10m]), "my_name", "_", "__name__") {my_name="metric", env="1"} 0.2 {my_name="another_metric", env="1"} 0.2 # Allows preserving __name__ via label_join -eval instant at 15m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env") +eval instant at 10m label_join(rate({env="1"}[10m]), "__name__", "_", "__name__", "env") metric_1{env="1"} 0.2 another_metric_1{env="1"} 0.2 # Does not drop metric names fro aggregation operators -eval instant at 15m sum by (__name__, env) (metric{env="1"}) +eval instant at 10m sum by (__name__, env) (metric{env="1"}) metric{env="1"} 120 # Aggregation operators by __name__ lead to duplicate labelset errors (aggregation is partitioned by not yet removed __name__ label) # This is an accidental side effect of delayed __name__ label dropping -eval_fail instant at 15m sum by (__name__) (rate({env="1"}[10m])) +eval_fail instant at 10m sum by (__name__) (rate({env="1"}[10m])) # Aggregation operators aggregate metrics with same labelset and to-be-dropped names # This is an accidental side effect of delayed __name__ label dropping -eval instant at 15m sum(rate({env="1"}[10m])) by (env) +eval instant at 10m sum(rate({env="1"}[10m])) by (env) {env="1"} 0.4 # Aggregationk operators propagate __name__ label dropping information -eval instant at 15m topk(10, sum by (__name__, env) (metric{env="1"})) +eval instant at 10m topk(10, sum by (__name__, env) (metric{env="1"})) metric{env="1"} 120 -eval instant at 15m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m]))) +eval instant at 10m topk(10, sum by (__name__, env) (rate(metric{env="1"}[10m]))) {env="1"} 0.2 diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test index 7d2eec32cfa..8c5814ae8a0 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/native_histograms.test @@ -2,55 +2,58 @@ load 5m empty_histogram {{}} -eval instant at 5m empty_histogram +eval instant at 1m empty_histogram {__name__="empty_histogram"} {{}} -eval instant at 5m histogram_count(empty_histogram) +eval instant at 1m histogram_count(empty_histogram) {} 0 -eval instant at 5m histogram_sum(empty_histogram) +eval instant at 1m histogram_sum(empty_histogram) {} 0 -eval instant at 5m histogram_avg(empty_histogram) +eval instant at 1m histogram_avg(empty_histogram) {} NaN -eval instant at 5m histogram_fraction(-Inf, +Inf, empty_histogram) +eval instant at 1m histogram_fraction(-Inf, +Inf, empty_histogram) {} NaN -eval instant at 5m histogram_fraction(0, 8, empty_histogram) +eval instant at 1m histogram_fraction(0, 8, empty_histogram) {} NaN - +clear # buckets:[1 2 1] means 1 observation in the 1st bucket, 2 observations in the 2nd and 1 observation in the 3rd (total 4). load 5m single_histogram {{schema:0 sum:5 count:4 buckets:[1 2 1]}} # histogram_count extracts the count property from the histogram. -eval instant at 5m histogram_count(single_histogram) +eval instant at 1m histogram_count(single_histogram) {} 4 # histogram_sum extracts the sum property from the histogram. -eval instant at 5m histogram_sum(single_histogram) +eval instant at 1m histogram_sum(single_histogram) {} 5 # histogram_avg calculates the average from sum and count properties. -eval instant at 5m histogram_avg(single_histogram) +eval instant at 1m histogram_avg(single_histogram) {} 1.25 # We expect half of the values to fall in the range 1 < x <= 2. -eval instant at 5m histogram_fraction(1, 2, single_histogram) +eval instant at 1m histogram_fraction(1, 2, single_histogram) {} 0.5 # We expect all values to fall in the range 0 < x <= 8. -eval instant at 5m histogram_fraction(0, 8, single_histogram) +eval instant at 1m histogram_fraction(0, 8, single_histogram) {} 1 -# Median is 1.5 due to linear estimation of the midpoint of the middle bucket, whose values are within range 1 < x <= 2. -eval instant at 5m histogram_quantile(0.5, single_histogram) - {} 1.5 - +# Median is 1.414213562373095 (2**2**-1, or sqrt(2)) due to +# exponential interpolation, i.e. the "midpoint" within range 1 < x <= +# 2 is assumed where the bucket boundary would be if we increased the +# resolution of the histogram by one step. +eval instant at 1m histogram_quantile(0.5, single_histogram) + {} 1.414213562373095 +clear # Repeat the same histogram 10 times. load 5m @@ -68,8 +71,9 @@ eval instant at 5m histogram_avg(multi_histogram) eval instant at 5m histogram_fraction(1, 2, multi_histogram) {} 0.5 +# See explanation for exponential interpolation above. eval instant at 5m histogram_quantile(0.5, multi_histogram) - {} 1.5 + {} 1.414213562373095 # Each entry should look the same as the first. @@ -85,10 +89,11 @@ eval instant at 50m histogram_avg(multi_histogram) eval instant at 50m histogram_fraction(1, 2, multi_histogram) {} 0.5 +# See explanation for exponential interpolation above. eval instant at 50m histogram_quantile(0.5, multi_histogram) - {} 1.5 - + {} 1.414213562373095 +clear # Accumulate the histogram addition for 10 iterations, offset is a bucket position where offset:0 is always the bucket # with an upper limit of 1 and offset:1 is the bucket which follows to the right. Negative offsets represent bucket @@ -109,8 +114,9 @@ eval instant at 5m histogram_avg(incr_histogram) eval instant at 5m histogram_fraction(1, 2, incr_histogram) {} 0.6 +# See explanation for exponential interpolation above. eval instant at 5m histogram_quantile(0.5, incr_histogram) - {} 1.5 + {} 1.414213562373095 eval instant at 50m incr_histogram @@ -129,18 +135,20 @@ eval instant at 50m histogram_avg(incr_histogram) eval instant at 50m histogram_fraction(1, 2, incr_histogram) {} 0.8571428571428571 +# See explanation for exponential interpolation above. eval instant at 50m histogram_quantile(0.5, incr_histogram) - {} 1.5 + {} 1.414213562373095 # Per-second average rate of increase should be 1/(5*60) for count and buckets, then 2/(5*60) for sum. -eval instant at 50m rate(incr_histogram[5m]) - {} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}} +eval instant at 50m rate(incr_histogram[10m]) + {} {{count:0.0033333333333333335 sum:0.006666666666666667 offset:1 buckets:[0.0033333333333333335]}} # Calculate the 50th percentile of observations over the last 10m. +# See explanation for exponential interpolation above. eval instant at 50m histogram_quantile(0.5, rate(incr_histogram[10m])) - {} 1.5 - + {} 1.414213562373095 +clear # Schema represents the histogram resolution, different schema have compatible bucket boundaries, e.g.: # 0: 1 2 4 8 16 32 64 (higher resolution) @@ -166,77 +174,79 @@ eval instant at 5m histogram_avg(low_res_histogram) eval instant at 5m histogram_fraction(1, 4, low_res_histogram) {} 1 - +clear # z_bucket:1 means there is one observation in the zero bucket and z_bucket_w:0.5 means the zero bucket has the range # 0 < x <= 0.5. Sum and count are expected to represent all observations in the histogram, including those in the zero bucket. load 5m single_zero_histogram {{schema:0 z_bucket:1 z_bucket_w:0.5 sum:0.25 count:1}} -eval instant at 5m histogram_count(single_zero_histogram) +eval instant at 1m histogram_count(single_zero_histogram) {} 1 -eval instant at 5m histogram_sum(single_zero_histogram) +eval instant at 1m histogram_sum(single_zero_histogram) {} 0.25 -eval instant at 5m histogram_avg(single_zero_histogram) +eval instant at 1m histogram_avg(single_zero_histogram) {} 0.25 # When only the zero bucket is populated, or there are negative buckets, the distribution is assumed to be equally # distributed around zero; i.e. that there are an equal number of positive and negative observations. Therefore the # entire distribution must lie within the full range of the zero bucket, in this case: -0.5 < x <= +0.5. -eval instant at 5m histogram_fraction(-0.5, 0.5, single_zero_histogram) +eval instant at 1m histogram_fraction(-0.5, 0.5, single_zero_histogram) {} 1 # Half of the observations are estimated to be zero, as this is the midpoint between -0.5 and +0.5. -eval instant at 5m histogram_quantile(0.5, single_zero_histogram) +eval instant at 1m histogram_quantile(0.5, single_zero_histogram) {} 0 - +clear # Let's turn single_histogram upside-down. load 5m negative_histogram {{schema:0 sum:-5 count:4 n_buckets:[1 2 1]}} -eval instant at 5m histogram_count(negative_histogram) +eval instant at 1m histogram_count(negative_histogram) {} 4 -eval instant at 5m histogram_sum(negative_histogram) +eval instant at 1m histogram_sum(negative_histogram) {} -5 -eval instant at 5m histogram_avg(negative_histogram) +eval instant at 1m histogram_avg(negative_histogram) {} -1.25 # We expect half of the values to fall in the range -2 < x <= -1. -eval instant at 5m histogram_fraction(-2, -1, negative_histogram) +eval instant at 1m histogram_fraction(-2, -1, negative_histogram) {} 0.5 -eval instant at 5m histogram_quantile(0.5, negative_histogram) - {} -1.5 - +# Exponential interpolation works the same as for positive buckets, just mirrored. +eval instant at 1m histogram_quantile(0.5, negative_histogram) + {} -1.414213562373095 +clear # Two histogram samples. load 5m two_samples_histogram {{schema:0 sum:4 count:4 buckets:[1 2 1]}} {{schema:0 sum:-4 count:4 n_buckets:[1 2 1]}} # We expect to see the newest sample. -eval instant at 10m histogram_count(two_samples_histogram) +eval instant at 5m histogram_count(two_samples_histogram) {} 4 -eval instant at 10m histogram_sum(two_samples_histogram) +eval instant at 5m histogram_sum(two_samples_histogram) {} -4 -eval instant at 10m histogram_avg(two_samples_histogram) +eval instant at 5m histogram_avg(two_samples_histogram) {} -1 -eval instant at 10m histogram_fraction(-2, -1, two_samples_histogram) +eval instant at 5m histogram_fraction(-2, -1, two_samples_histogram) {} 0.5 -eval instant at 10m histogram_quantile(0.5, two_samples_histogram) - {} -1.5 - +# See explanation for exponential interpolation above. +eval instant at 5m histogram_quantile(0.5, two_samples_histogram) + {} -1.414213562373095 +clear # Add two histograms with negated data. load 5m @@ -259,6 +269,8 @@ eval instant at 5m histogram_fraction(0, 4, balanced_histogram) eval instant at 5m histogram_quantile(0.5, balanced_histogram) {} 0.5 +clear + # Add histogram to test sum(last_over_time) regression load 5m incr_sum_histogram{number="1"} {{schema:0 sum:0 count:0 buckets:[1]}}+{{schema:0 sum:1 count:1 buckets:[1]}}x10 @@ -270,6 +282,8 @@ eval instant at 50m histogram_sum(sum(incr_sum_histogram)) eval instant at 50m histogram_sum(sum(last_over_time(incr_sum_histogram[5m]))) {} 30 +clear + # Apply rate function to histogram. load 15s histogram_rate {{schema:1 count:12 sum:18.4 z_bucket:2 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[1 2 0 1 1]}}+{{schema:1 count:9 sum:18.4 z_bucket:1 z_bucket_w:0.001 buckets:[1 1 0 1 1] n_buckets:[1 1 0 1 1]}}x100 @@ -280,6 +294,8 @@ eval instant at 5m rate(histogram_rate[45s]) eval range from 5m to 5m30s step 30s rate(histogram_rate[45s]) {} {{schema:1 count:0.6 sum:1.2266666666666652 z_bucket:0.06666666666666667 z_bucket_w:0.001 buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667] n_buckets:[0.06666666666666667 0.06666666666666667 0 0.06666666666666667 0.06666666666666667]}}x1 +clear + # Apply count and sum function to histogram. load 10m histogram_count_sum_2 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 @@ -290,6 +306,8 @@ eval instant at 10m histogram_count(histogram_count_sum_2) eval instant at 10m histogram_sum(histogram_count_sum_2) {} 100 +clear + # Apply stddev and stdvar function to histogram with {1, 2, 3, 4} (low res). load 10m histogram_stddev_stdvar_1 {{schema:2 count:4 sum:10 buckets:[1 0 0 0 1 0 0 1 1]}}x1 @@ -300,6 +318,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_1) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_1) {} 1.163807968526718 +clear + # Apply stddev and stdvar function to histogram with {1, 1, 1, 1} (high res). load 10m histogram_stddev_stdvar_2 {{schema:8 count:10 sum:10 buckets:[1 2 3 4]}}x1 @@ -310,6 +330,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_2) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_2) {} 2.3971123370139447e-05 +clear + # Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9}. load 10m histogram_stddev_stdvar_3 {{schema:3 count:7 sum:62 z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 @@ -320,6 +342,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_3) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_3) {} 1844.4651144196398 +clear + # Apply stddev and stdvar function to histogram with {-100000, -10000, -1000, -888, -888, -100, -50, -9, -8, -3}. load 10m histogram_stddev_stdvar_4 {{schema:0 count:10 sum:-112946 z_bucket:0 n_buckets:[0 0 1 1 1 0 1 1 0 0 3 0 0 0 1 0 0 1]}}x1 @@ -330,6 +354,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_4) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_4) {} 759352122.1939945 +clear + # Apply stddev and stdvar function to histogram with {-10x10}. load 10m histogram_stddev_stdvar_5 {{schema:0 count:10 sum:-100 z_bucket:0 n_buckets:[0 0 0 0 10]}}x1 @@ -340,6 +366,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_5) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_5) {} 1.725830020304794 +clear + # Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, NaN}. load 10m histogram_stddev_stdvar_6 {{schema:3 count:7 sum:NaN z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 @@ -350,6 +378,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_6) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_6) {} NaN +clear + # Apply stddev and stdvar function to histogram with {-50, -8, 0, 3, 8, 9, Inf}. load 10m histogram_stddev_stdvar_7 {{schema:3 count:7 sum:Inf z_bucket:1 buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ] n_buckets:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 ]}}x1 @@ -360,6 +390,8 @@ eval instant at 10m histogram_stddev(histogram_stddev_stdvar_7) eval instant at 10m histogram_stdvar(histogram_stddev_stdvar_7) {} Inf +clear + # Apply quantile function to histogram with all positive buckets with zero bucket. load 10m histogram_quantile_1 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 @@ -370,20 +402,24 @@ eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_1) eval instant at 10m histogram_quantile(1, histogram_quantile_1) {} 16 +# The following quantiles are within a bucket. Exponential +# interpolation is applied (rather than linear, as it is done for +# classic histograms), leading to slightly different quantile values. eval instant at 10m histogram_quantile(0.99, histogram_quantile_1) - {} 15.759999999999998 + {} 15.67072476139083 eval instant at 10m histogram_quantile(0.9, histogram_quantile_1) - {} 13.600000000000001 + {} 12.99603834169977 eval instant at 10m histogram_quantile(0.6, histogram_quantile_1) - {} 4.799999999999997 + {} 4.594793419988138 eval instant at 10m histogram_quantile(0.5, histogram_quantile_1) - {} 1.6666666666666665 + {} 1.5874010519681994 +# Linear interpolation within the zero bucket after all. eval instant at 10m histogram_quantile(0.1, histogram_quantile_1) - {} 0.0006000000000000001 + {} 0.0006 eval instant at 10m histogram_quantile(0, histogram_quantile_1) {} 0 @@ -391,6 +427,8 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_1) eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_1) {} -Inf +clear + # Apply quantile function to histogram with all negative buckets with zero bucket. load 10m histogram_quantile_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 n_buckets:[2 3 0 1 4]}}x1 @@ -401,17 +439,20 @@ eval_warn instant at 10m histogram_quantile(1.001, histogram_quantile_2) eval instant at 10m histogram_quantile(1, histogram_quantile_2) {} 0 +# Again, the quantile values here are slightly different from what +# they would be with linear interpolation. Note that quantiles +# ending up in the zero bucket are linearly interpolated after all. eval instant at 10m histogram_quantile(0.99, histogram_quantile_2) - {} -6.000000000000048e-05 + {} -0.00006 eval instant at 10m histogram_quantile(0.9, histogram_quantile_2) - {} -0.0005999999999999996 + {} -0.0006 eval instant at 10m histogram_quantile(0.5, histogram_quantile_2) - {} -1.6666666666666667 + {} -1.5874010519681996 eval instant at 10m histogram_quantile(0.1, histogram_quantile_2) - {} -13.6 + {} -12.996038341699768 eval instant at 10m histogram_quantile(0, histogram_quantile_2) {} -16 @@ -419,7 +460,11 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_2) eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_2) {} -Inf -# Apply quantile function to histogram with both positive and negative buckets with zero bucket. +clear + +# Apply quantile function to histogram with both positive and negative +# buckets with zero bucket. +# First positive buckets with exponential interpolation. load 10m histogram_quantile_3 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 @@ -430,31 +475,34 @@ eval instant at 10m histogram_quantile(1, histogram_quantile_3) {} 16 eval instant at 10m histogram_quantile(0.99, histogram_quantile_3) - {} 15.519999999999996 + {} 15.34822590920423 eval instant at 10m histogram_quantile(0.9, histogram_quantile_3) - {} 11.200000000000003 + {} 10.556063286183155 eval instant at 10m histogram_quantile(0.7, histogram_quantile_3) - {} 1.2666666666666657 + {} 1.2030250360821164 +# Linear interpolation in the zero bucket, symmetrically centered around +# the zero point. eval instant at 10m histogram_quantile(0.55, histogram_quantile_3) - {} 0.0006000000000000005 + {} 0.0006 eval instant at 10m histogram_quantile(0.5, histogram_quantile_3) {} 0 eval instant at 10m histogram_quantile(0.45, histogram_quantile_3) - {} -0.0005999999999999996 + {} -0.0006 +# Finally negative buckets with mirrored exponential interpolation. eval instant at 10m histogram_quantile(0.3, histogram_quantile_3) - {} -1.266666666666667 + {} -1.2030250360821169 eval instant at 10m histogram_quantile(0.1, histogram_quantile_3) - {} -11.2 + {} -10.556063286183155 eval instant at 10m histogram_quantile(0.01, histogram_quantile_3) - {} -15.52 + {} -15.34822590920423 eval instant at 10m histogram_quantile(0, histogram_quantile_3) {} -16 @@ -462,6 +510,92 @@ eval instant at 10m histogram_quantile(0, histogram_quantile_3) eval_warn instant at 10m histogram_quantile(-1, histogram_quantile_3) {} -Inf +clear + +# Try different schemas. (The interpolation logic must not depend on the schema.) +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 buckets:[0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 buckets:[0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 buckets:[0 5]}} + +eval instant at 1m histogram_quantile(0.5, var_res_histogram) + {schema="-1"} 2.0 + {schema="0"} 1.4142135623730951 + {schema="+1"} 1.189207 + +eval instant at 1m histogram_fraction(0, 2, var_res_histogram{schema="-1"}) + {schema="-1"} 0.5 + +eval instant at 1m histogram_fraction(0, 1.4142135623730951, var_res_histogram{schema="0"}) + {schema="0"} 0.5 + +eval instant at 1m histogram_fraction(0, 1.189207, var_res_histogram{schema="+1"}) + {schema="+1"} 0.5 + +# The same as above, but one bucket "further to the right". +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 buckets:[0 0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 buckets:[0 0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 buckets:[0 0 5]}} + +eval instant at 1m histogram_quantile(0.5, var_res_histogram) + {schema="-1"} 8.0 + {schema="0"} 2.82842712474619 + {schema="+1"} 1.6817928305074292 + +eval instant at 1m histogram_fraction(0, 8, var_res_histogram{schema="-1"}) + {schema="-1"} 0.5 + +eval instant at 1m histogram_fraction(0, 2.82842712474619, var_res_histogram{schema="0"}) + {schema="0"} 0.5 + +eval instant at 1m histogram_fraction(0, 1.6817928305074292, var_res_histogram{schema="+1"}) + {schema="+1"} 0.5 + +# And everything again but for negative buckets. +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 n_buckets:[0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 n_buckets:[0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 n_buckets:[0 5]}} + +eval instant at 1m histogram_quantile(0.5, var_res_histogram) + {schema="-1"} -2.0 + {schema="0"} -1.4142135623730951 + {schema="+1"} -1.189207 + +eval instant at 1m histogram_fraction(-2, 0, var_res_histogram{schema="-1"}) + {schema="-1"} 0.5 + +eval instant at 1m histogram_fraction(-1.4142135623730951, 0, var_res_histogram{schema="0"}) + {schema="0"} 0.5 + +eval instant at 1m histogram_fraction(-1.189207, 0, var_res_histogram{schema="+1"}) + {schema="+1"} 0.5 + +clear +load 1m + var_res_histogram{schema="-1"} {{schema:-1 sum:6 count:5 n_buckets:[0 0 5]}} + var_res_histogram{schema="0"} {{schema:0 sum:4 count:5 n_buckets:[0 0 5]}} + var_res_histogram{schema="+1"} {{schema:1 sum:4 count:5 n_buckets:[0 0 5]}} + +eval instant at 1m histogram_quantile(0.5, var_res_histogram) + {schema="-1"} -8.0 + {schema="0"} -2.82842712474619 + {schema="+1"} -1.6817928305074292 + +eval instant at 1m histogram_fraction(-8, 0, var_res_histogram{schema="-1"}) + {schema="-1"} 0.5 + +eval instant at 1m histogram_fraction(-2.82842712474619, 0, var_res_histogram{schema="0"}) + {schema="0"} 0.5 + +eval instant at 1m histogram_fraction(-1.6817928305074292, 0, var_res_histogram{schema="+1"}) + {schema="+1"} 0.5 + + # Apply fraction function to empty histogram. load 10m histogram_fraction_1 {{}}x1 @@ -469,6 +603,8 @@ load 10m eval instant at 10m histogram_fraction(3.1415, 42, histogram_fraction_1) {} NaN +clear + # Apply fraction function to histogram with positive and zero buckets. load 10m histogram_fraction_2 {{schema:0 count:12 sum:100 z_bucket:2 z_bucket_w:0.001 buckets:[2 3 0 1 4]}}x1 @@ -485,11 +621,18 @@ eval instant at 10m histogram_fraction(-0.001, 0, histogram_fraction_2) eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_2) {} 0.16666666666666666 +# Note that this result and the one above add up to 1. +eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2) + {} 0.8333333333333334 + +# We are in the zero bucket, resulting in linear interpolation eval instant at 10m histogram_fraction(0, 0.0005, histogram_fraction_2) {} 0.08333333333333333 -eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_2) - {} 0.8333333333333334 +# Demonstrate that the inverse operation with histogram_quantile yields +# the original value with the non-trivial result above. +eval instant at 10m histogram_quantile(0.08333333333333333, histogram_fraction_2) + {} 0.0005 eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2) {} 0 @@ -497,17 +640,30 @@ eval instant at 10m histogram_fraction(-inf, -0.001, histogram_fraction_2) eval instant at 10m histogram_fraction(1, 2, histogram_fraction_2) {} 0.25 +# More non-trivial results with interpolation involved below, including +# some round-trips via histogram_quantile to prove that the inverse +# operation leads to the same results. + +eval instant at 10m histogram_fraction(0, 1.5, histogram_fraction_2) + {} 0.4795739585136224 + eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_2) - {} 0.125 + {} 0.10375937481971091 eval instant at 10m histogram_fraction(1, 8, histogram_fraction_2) {} 0.3333333333333333 +eval instant at 10m histogram_fraction(0, 6, histogram_fraction_2) + {} 0.6320802083934297 + +eval instant at 10m histogram_quantile(0.6320802083934297, histogram_fraction_2) + {} 6 + eval instant at 10m histogram_fraction(1, 6, histogram_fraction_2) - {} 0.2916666666666667 + {} 0.29874687506009634 eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_2) - {} 0.16666666666666666 + {} 0.15250624987980724 eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_2) {} 0 @@ -570,6 +726,12 @@ eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_3) eval instant at 10m histogram_fraction(-0.0005, 0, histogram_fraction_3) {} 0.08333333333333333 +eval instant at 10m histogram_fraction(-inf, -0.0005, histogram_fraction_3) + {} 0.9166666666666666 + +eval instant at 10m histogram_quantile(0.9166666666666666, histogram_fraction_3) + {} -0.0005 + eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_3) {} 0 @@ -595,16 +757,22 @@ eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_3) {} 0.25 eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_3) - {} 0.125 + {} 0.10375937481971091 eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_3) {} 0.3333333333333333 +eval instant at 10m histogram_fraction(-inf, -6, histogram_fraction_3) + {} 0.36791979160657035 + +eval instant at 10m histogram_quantile(0.36791979160657035, histogram_fraction_3) + {} -6 + eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_3) - {} 0.2916666666666667 + {} 0.29874687506009634 eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_3) - {} 0.16666666666666666 + {} 0.15250624987980724 eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_3) {} 0 @@ -633,6 +801,8 @@ eval instant at 10m histogram_fraction(NaN, NaN, histogram_fraction_3) eval instant at 10m histogram_fraction(-Inf, +Inf, histogram_fraction_3) {} 1 +clear + # Apply fraction function to histogram with both positive, negative and zero buckets. load 10m histogram_fraction_4 {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 @@ -652,6 +822,18 @@ eval instant at 10m histogram_fraction(0, 0.001, histogram_fraction_4) eval instant at 10m histogram_fraction(-0.0005, 0.0005, histogram_fraction_4) {} 0.08333333333333333 +eval instant at 10m histogram_fraction(-inf, 0.0005, histogram_fraction_4) + {} 0.5416666666666666 + +eval instant at 10m histogram_quantile(0.5416666666666666, histogram_fraction_4) + {} 0.0005 + +eval instant at 10m histogram_fraction(-inf, -0.0005, histogram_fraction_4) + {} 0.4583333333333333 + +eval instant at 10m histogram_quantile(0.4583333333333333, histogram_fraction_4) + {} -0.0005 + eval instant at 10m histogram_fraction(0.001, inf, histogram_fraction_4) {} 0.4166666666666667 @@ -662,31 +844,31 @@ eval instant at 10m histogram_fraction(1, 2, histogram_fraction_4) {} 0.125 eval instant at 10m histogram_fraction(1.5, 2, histogram_fraction_4) - {} 0.0625 + {} 0.051879687409855414 eval instant at 10m histogram_fraction(1, 8, histogram_fraction_4) {} 0.16666666666666666 eval instant at 10m histogram_fraction(1, 6, histogram_fraction_4) - {} 0.14583333333333334 + {} 0.14937343753004825 eval instant at 10m histogram_fraction(1.5, 6, histogram_fraction_4) - {} 0.08333333333333333 + {} 0.07625312493990366 eval instant at 10m histogram_fraction(-2, -1, histogram_fraction_4) {} 0.125 eval instant at 10m histogram_fraction(-2, -1.5, histogram_fraction_4) - {} 0.0625 + {} 0.051879687409855456 eval instant at 10m histogram_fraction(-8, -1, histogram_fraction_4) {} 0.16666666666666666 eval instant at 10m histogram_fraction(-6, -1, histogram_fraction_4) - {} 0.14583333333333334 + {} 0.14937343753004817 eval instant at 10m histogram_fraction(-6, -1.5, histogram_fraction_4) - {} 0.08333333333333333 + {} 0.07625312493990362 eval instant at 10m histogram_fraction(42, 3.1415, histogram_fraction_4) {} 0 @@ -766,18 +948,40 @@ eval instant at 10m histogram_mul_div*float_series_0 eval instant at 10m float_series_0*histogram_mul_div {} {{schema:0 count:0 sum:0 z_bucket:0 z_bucket_w:0.001 buckets:[0 0 0] n_buckets:[0 0 0]}} -# TODO: (NeerajGartia21) remove all the histogram buckets in case of division with zero. See: https://github.com/prometheus/prometheus/issues/13934 eval instant at 10m histogram_mul_div/0 - {} {{schema:0 count:Inf sum:Inf z_bucket:Inf z_bucket_w:0.001 buckets:[Inf Inf Inf] n_buckets:[Inf Inf Inf]}} + {} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}} eval instant at 10m histogram_mul_div/float_series_0 - {} {{schema:0 count:Inf sum:Inf z_bucket:Inf z_bucket_w:0.001 buckets:[Inf Inf Inf] n_buckets:[Inf Inf Inf]}} + {} {{schema:0 count:Inf sum:Inf z_bucket_w:0.001 z_bucket:Inf}} eval instant at 10m histogram_mul_div*0/0 - {} {{schema:0 count:NaN sum:NaN z_bucket:NaN z_bucket_w:0.001 buckets:[NaN NaN NaN] n_buckets:[NaN NaN NaN]}} + {} {{schema:0 count:NaN sum:NaN z_bucket_w:0.001 z_bucket:NaN}} + +eval_info instant at 10m histogram_mul_div*histogram_mul_div + +eval_info instant at 10m histogram_mul_div/histogram_mul_div + +eval_info instant at 10m float_series_3/histogram_mul_div + +eval_info instant at 10m 0/histogram_mul_div clear +# Apply binary operators to mixed histogram and float samples. +# TODO:(NeerajGartia21) move these tests to their respective locations when tests from engine_test.go are be moved here. + +load 10m + histogram_sample {{schema:0 count:24 sum:100 z_bucket:4 z_bucket_w:0.001 buckets:[2 3 0 1 4] n_buckets:[2 3 0 1 4]}}x1 + float_sample 0x1 + +eval_info instant at 10m float_sample+histogram_sample + +eval_info instant at 10m histogram_sample+float_sample + +eval_info instant at 10m float_sample-histogram_sample + +eval_info instant at 10m histogram_sample-float_sample + # Counter reset only noticeable in a single bucket. load 5m reset_in_bucket {{schema:0 count:4 sum:5 buckets:[1 2 1]}} {{schema:0 count:5 sum:6 buckets:[1 1 3]}} {{schema:0 count:6 sum:7 buckets:[1 2 3]}} @@ -814,7 +1018,7 @@ load 30s some_metric {{schema:0 sum:1 count:1 buckets:[1] counter_reset_hint:gauge}} {{schema:0 sum:2 count:2 buckets:[2] counter_reset_hint:gauge}} {{schema:0 sum:3 count:3 buckets:[3] counter_reset_hint:gauge}} # Test the case where we only have two points for rate -eval_warn instant at 30s rate(some_metric[30s]) +eval_warn instant at 30s rate(some_metric[1m]) {} {{count:0.03333333333333333 sum:0.03333333333333333 buckets:[0.03333333333333333]}} # Test the case where we have more than two points for rate @@ -836,11 +1040,11 @@ eval_warn instant at 1m30s rate(some_metric[1m]) # Should produce no results. # Start with custom, end with exponential. -eval_warn instant at 1m rate(some_metric[30s]) +eval_warn instant at 1m rate(some_metric[1m]) # Should produce no results. # Start with exponential, end with custom. -eval_warn instant at 30s rate(some_metric[30s]) +eval_warn instant at 30s rate(some_metric[1m]) # Should produce no results. clear @@ -975,8 +1179,8 @@ clear load 1m histogram_sum_over_time {{schema:0 count:25 sum:1234.5 z_bucket:4 z_bucket_w:0.001 buckets:[1 2 0 1 1] n_buckets:[2 4 0 0 1 9]}} {{schema:0 count:41 sum:2345.6 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:0 count:41 sum:1111.1 z_bucket:5 z_bucket_w:0.001 buckets:[1 3 1 2 1 1 1] n_buckets:[0 1 4 2 7 0 0 0 0 5 5 2]}} {{schema:1 count:0}} -eval instant at 3m sum_over_time(histogram_sum_over_time[3m:1m]) +eval instant at 3m sum_over_time(histogram_sum_over_time[4m:1m]) {} {{schema:0 count:107 sum:4691.2 z_bucket:14 z_bucket_w:0.001 buckets:[3 8 2 5 3 2 2] n_buckets:[2 6 8 4 15 9 0 0 0 10 10 4]}} -eval instant at 3m avg_over_time(histogram_sum_over_time[3m:1m]) +eval instant at 3m avg_over_time(histogram_sum_over_time[4m:1m]) {} {{schema:0 count:26.75 sum:1172.8 z_bucket:3.5 z_bucket_w:0.001 buckets:[0.75 2 0.5 1.25 0.75 0.5 0.5] n_buckets:[0.5 1.5 2 1 3.75 2.25 0 0 0 2.5 2.5 1]}} diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test index df2311b9bae..645cca88b85 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/operators.test @@ -113,7 +113,7 @@ eval instant at 50m http_requests{job="api-server", group="canary"} http_requests{group="canary", instance="0", job="api-server"} 300 http_requests{group="canary", instance="1", job="api-server"} 400 -eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[5m]) * 5 * 60 +eval instant at 50m http_requests{job="api-server", group="canary"} + rate(http_requests{job="api-server"}[10m]) * 5 * 60 {group="canary", instance="0", job="api-server"} 330 {group="canary", instance="1", job="api-server"} 440 @@ -308,65 +308,65 @@ load 5m threshold{instance="abc",job="node",target="a@b.com"} 0 # Copy machine role to node variable. -eval instant at 5m node_role * on (instance) group_right (role) node_var +eval instant at 1m node_role * on (instance) group_right (role) node_var {instance="abc",job="node",role="prometheus"} 2 -eval instant at 5m node_var * on (instance) group_left (role) node_role +eval instant at 1m node_var * on (instance) group_left (role) node_role {instance="abc",job="node",role="prometheus"} 2 -eval instant at 5m node_var * ignoring (role) group_left (role) node_role +eval instant at 1m node_var * ignoring (role) group_left (role) node_role {instance="abc",job="node",role="prometheus"} 2 -eval instant at 5m node_role * ignoring (role) group_right (role) node_var +eval instant at 1m node_role * ignoring (role) group_right (role) node_var {instance="abc",job="node",role="prometheus"} 2 # Copy machine role to node variable with instrumentation labels. -eval instant at 5m node_cpu * ignoring (role, mode) group_left (role) node_role +eval instant at 1m node_cpu * ignoring (role, mode) group_left (role) node_role {instance="abc",job="node",mode="idle",role="prometheus"} 3 {instance="abc",job="node",mode="user",role="prometheus"} 1 -eval instant at 5m node_cpu * on (instance) group_left (role) node_role +eval instant at 1m node_cpu * on (instance) group_left (role) node_role {instance="abc",job="node",mode="idle",role="prometheus"} 3 {instance="abc",job="node",mode="user",role="prometheus"} 1 # Ratio of total. -eval instant at 5m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu) +eval instant at 1m node_cpu / on (instance) group_left sum by (instance,job)(node_cpu) {instance="abc",job="node",mode="idle"} .75 {instance="abc",job="node",mode="user"} .25 {instance="def",job="node",mode="idle"} .80 {instance="def",job="node",mode="user"} .20 -eval instant at 5m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu) +eval instant at 1m sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu) {job="node",mode="idle"} 0.7857142857142857 {job="node",mode="user"} 0.21428571428571427 -eval instant at 5m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)) +eval instant at 1m sum(sum by (mode, job)(node_cpu) / on (job) group_left sum by (job)(node_cpu)) {} 1.0 -eval instant at 5m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu) +eval instant at 1m node_cpu / ignoring (mode) group_left sum without (mode)(node_cpu) {instance="abc",job="node",mode="idle"} .75 {instance="abc",job="node",mode="user"} .25 {instance="def",job="node",mode="idle"} .80 {instance="def",job="node",mode="user"} .20 -eval instant at 5m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu) +eval instant at 1m node_cpu / ignoring (mode) group_left(dummy) sum without (mode)(node_cpu) {instance="abc",job="node",mode="idle"} .75 {instance="abc",job="node",mode="user"} .25 {instance="def",job="node",mode="idle"} .80 {instance="def",job="node",mode="user"} .20 -eval instant at 5m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu) +eval instant at 1m sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu) {job="node",mode="idle"} 0.7857142857142857 {job="node",mode="user"} 0.21428571428571427 -eval instant at 5m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)) +eval instant at 1m sum(sum without (instance)(node_cpu) / ignoring (mode) group_left sum without (instance, mode)(node_cpu)) {} 1.0 # Copy over label from metric with no matching labels, without having to list cross-job target labels ('job' here). -eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0 +eval instant at 1m node_cpu + on(dummy) group_left(foo) random*0 {instance="abc",job="node",mode="idle",foo="bar"} 3 {instance="abc",job="node",mode="user",foo="bar"} 1 {instance="def",job="node",mode="idle",foo="bar"} 8 @@ -374,12 +374,12 @@ eval instant at 5m node_cpu + on(dummy) group_left(foo) random*0 # Use threshold from metric, and copy over target. -eval instant at 5m node_cpu > on(job, instance) group_left(target) threshold +eval instant at 1m node_cpu > on(job, instance) group_left(target) threshold node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3 node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1 # Use threshold from metric, and a default (1) if it's not present. -eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1)) +eval instant at 1m node_cpu > on(job, instance) group_left(target) (threshold or on (job, instance) (sum by (job, instance)(node_cpu) * 0 + 1)) node_cpu{instance="abc",job="node",mode="idle",target="a@b.com"} 3 node_cpu{instance="abc",job="node",mode="user",target="a@b.com"} 1 node_cpu{instance="def",job="node",mode="idle"} 8 @@ -387,37 +387,37 @@ eval instant at 5m node_cpu > on(job, instance) group_left(target) (threshold or # Check that binops drop the metric name. -eval instant at 5m node_cpu + 2 +eval instant at 1m node_cpu + 2 {instance="abc",job="node",mode="idle"} 5 {instance="abc",job="node",mode="user"} 3 {instance="def",job="node",mode="idle"} 10 {instance="def",job="node",mode="user"} 4 -eval instant at 5m node_cpu - 2 +eval instant at 1m node_cpu - 2 {instance="abc",job="node",mode="idle"} 1 {instance="abc",job="node",mode="user"} -1 {instance="def",job="node",mode="idle"} 6 {instance="def",job="node",mode="user"} 0 -eval instant at 5m node_cpu / 2 +eval instant at 1m node_cpu / 2 {instance="abc",job="node",mode="idle"} 1.5 {instance="abc",job="node",mode="user"} 0.5 {instance="def",job="node",mode="idle"} 4 {instance="def",job="node",mode="user"} 1 -eval instant at 5m node_cpu * 2 +eval instant at 1m node_cpu * 2 {instance="abc",job="node",mode="idle"} 6 {instance="abc",job="node",mode="user"} 2 {instance="def",job="node",mode="idle"} 16 {instance="def",job="node",mode="user"} 4 -eval instant at 5m node_cpu ^ 2 +eval instant at 1m node_cpu ^ 2 {instance="abc",job="node",mode="idle"} 9 {instance="abc",job="node",mode="user"} 1 {instance="def",job="node",mode="idle"} 64 {instance="def",job="node",mode="user"} 4 -eval instant at 5m node_cpu % 2 +eval instant at 1m node_cpu % 2 {instance="abc",job="node",mode="idle"} 1 {instance="abc",job="node",mode="user"} 1 {instance="def",job="node",mode="idle"} 0 @@ -432,14 +432,14 @@ load 5m metricB{baz="meh"} 4 # On with no labels, for metrics with no common labels. -eval instant at 5m random + on() metricA +eval instant at 1m random + on() metricA {} 5 # Ignoring with no labels is the same as no ignoring. -eval instant at 5m metricA + ignoring() metricB +eval instant at 1m metricA + ignoring() metricB {baz="meh"} 7 -eval instant at 5m metricA + metricB +eval instant at 1m metricA + metricB {baz="meh"} 7 clear @@ -457,16 +457,16 @@ load 5m test_total{instance="localhost"} 50 test_smaller{instance="localhost"} 10 -eval instant at 5m test_total > bool test_smaller +eval instant at 1m test_total > bool test_smaller {instance="localhost"} 1 -eval instant at 5m test_total > test_smaller +eval instant at 1m test_total > test_smaller test_total{instance="localhost"} 50 -eval instant at 5m test_total < bool test_smaller +eval instant at 1m test_total < bool test_smaller {instance="localhost"} 0 -eval instant at 5m test_total < test_smaller +eval instant at 1m test_total < test_smaller clear @@ -476,14 +476,14 @@ load 5m trigx{} 20 trigNaN{} NaN -eval instant at 5m trigy atan2 trigx +eval instant at 1m trigy atan2 trigx {} 0.4636476090008061 -eval instant at 5m trigy atan2 trigNaN +eval instant at 1m trigy atan2 trigNaN {} NaN -eval instant at 5m 10 atan2 20 +eval instant at 1m 10 atan2 20 0.4636476090008061 -eval instant at 5m 10 atan2 NaN +eval instant at 1m 10 atan2 NaN NaN diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/range_queries.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/range_queries.test index e6951096026..3bfe2ce4cb3 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/range_queries.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/range_queries.test @@ -1,18 +1,18 @@ # sum_over_time with all values -load 30s +load 15s bar 0 1 10 100 1000 -eval range from 0 to 2m step 1m sum_over_time(bar[30s]) +eval range from 0 to 1m step 30s sum_over_time(bar[30s]) {} 0 11 1100 clear # sum_over_time with trailing values -load 30s +load 15s bar 0 1 10 100 1000 0 0 0 0 eval range from 0 to 2m step 1m sum_over_time(bar[30s]) - {} 0 11 1100 + {} 0 1100 0 clear @@ -21,15 +21,15 @@ load 30s bar 0 1 10 100 1000 10000 100000 1000000 10000000 eval range from 0 to 4m step 1m sum_over_time(bar[30s]) - {} 0 11 1100 110000 11000000 + {} 0 10 1000 100000 10000000 clear # sum_over_time with all values random -load 30s +load 15s bar 5 17 42 2 7 905 51 -eval range from 0 to 3m step 1m sum_over_time(bar[30s]) +eval range from 0 to 90s step 30s sum_over_time(bar[30s]) {} 5 59 9 956 clear diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/staleness.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/staleness.test index 4fdbc997b7f..a48473d4398 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/staleness.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/staleness.test @@ -14,10 +14,10 @@ eval instant at 40s metric {__name__="metric"} 2 # It goes stale 5 minutes after the last sample. -eval instant at 330s metric +eval instant at 329s metric {__name__="metric"} 2 -eval instant at 331s metric +eval instant at 330s metric # Range vector ignores stale sample. @@ -30,9 +30,13 @@ eval instant at 10s count_over_time(metric[1s]) eval instant at 20s count_over_time(metric[1s]) eval instant at 20s count_over_time(metric[10s]) + +eval instant at 20s count_over_time(metric[20s]) {} 1 eval instant at 20s count_over_time(metric[10]) + +eval instant at 20s count_over_time(metric[20]) {} 1 @@ -48,7 +52,7 @@ eval instant at 0s metric eval instant at 150s metric {__name__="metric"} 0 -eval instant at 300s metric +eval instant at 299s metric {__name__="metric"} 0 -eval instant at 301s metric +eval instant at 300s metric diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test index 1d338d97642..3ac547a2b57 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/subquery.test @@ -10,18 +10,18 @@ eval instant at 10s sum_over_time(metric[50s:5s]) # Every evaluation yields the last value, i.e. 2 eval instant at 5m sum_over_time(metric[50s:10s]) - {} 12 + {} 10 # Series becomes stale at 5m10s (5m after last sample) -# Hence subquery gets a single sample at 6m-50s=5m10s. -eval instant at 6m sum_over_time(metric[50s:10s]) +# Hence subquery gets a single sample at 5m10s. +eval instant at 5m59s sum_over_time(metric[60s:10s]) {} 2 eval instant at 10s rate(metric[20s:10s]) {} 0.1 eval instant at 20s rate(metric[20s:5s]) - {} 0.05 + {} 0.06666666666666667 clear @@ -49,16 +49,16 @@ load 10s metric3 0+3x1000 eval instant at 1000s sum_over_time(metric1[30s:10s]) - {} 394 + {} 297 -# This is (394*2 - 100), because other than the last 100 at 1000s, +# This is (97 + 98*2 + 99*2 + 100), because other than 97@975s and 100@1000s, # everything else is repeated with the 5s step. eval instant at 1000s sum_over_time(metric1[30s:5s]) - {} 688 + {} 591 -# Offset is aligned with the step. +# Offset is aligned with the step, so this is from [98@980s, 99@990s, 100@1000s]. eval instant at 1010s sum_over_time(metric1[30s:10s] offset 10s) - {} 394 + {} 297 # Same result for different offsets due to step alignment. eval instant at 1010s sum_over_time(metric1[30s:10s] offset 9s) @@ -93,16 +93,16 @@ eval instant at 1010s sum_over_time((metric1)[30:10] offset 3) # Nested subqueries eval instant at 1000s rate(sum_over_time(metric1[30s:10s])[50s:10s]) - {} 0.4 + {} 0.30000000000000004 eval instant at 1000s rate(sum_over_time(metric2[30s:10s])[50s:10s]) - {} 0.8 + {} 0.6000000000000001 eval instant at 1000s rate(sum_over_time(metric3[30s:10s])[50s:10s]) - {} 1.2 + {} 0.9 eval instant at 1000s rate(sum_over_time((metric1+metric2+metric3)[30s:10s])[30s:10s]) - {} 2.4 + {} 1.8 clear @@ -115,16 +115,20 @@ load 7s eval instant at 80s rate(metric[1m]) {} 2.517857143 -# No extrapolation, [2@20, 144@80]: (144 - 2) / 60 -eval instant at 80s rate(metric[1m:10s]) - {} 2.366666667 +# Extrapolated to range start for counter, [2@20, 144@80]: (144 - 2) / (80 - 20) +eval instant at 80s rate(metric[1m500ms:10s]) + {} 2.3666666666666667 + +# Extrapolated to zero value for counter, [2@20, 144@80]: (144 - 0) / 61 +eval instant at 80s rate(metric[1m1s:10s]) + {} 2.360655737704918 # Only one value between 10s and 20s, 2@14 eval instant at 20s min_over_time(metric[10s]) {} 2 -# min(1@10, 2@20) -eval instant at 20s min_over_time(metric[10s:10s]) +# min(2@20) +eval instant at 20s min_over_time(metric[15s:10s]) {} 1 eval instant at 20m min_over_time(rate(metric[5m])[20m:1m]) diff --git a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/trig_functions.test b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/trig_functions.test index fa5f94651b6..036621193d7 100644 --- a/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/trig_functions.test +++ b/vendor/github.com/prometheus/prometheus/promql/promqltest/testdata/trig_functions.test @@ -5,92 +5,92 @@ load 5m trig{l="y"} 20 trig{l="NaN"} NaN -eval instant at 5m sin(trig) +eval instant at 1m sin(trig) {l="x"} -0.5440211108893699 {l="y"} 0.9129452507276277 {l="NaN"} NaN -eval instant at 5m cos(trig) +eval instant at 1m cos(trig) {l="x"} -0.8390715290764524 {l="y"} 0.40808206181339196 {l="NaN"} NaN -eval instant at 5m tan(trig) +eval instant at 1m tan(trig) {l="x"} 0.6483608274590867 {l="y"} 2.2371609442247427 {l="NaN"} NaN -eval instant at 5m asin(trig - 10.1) +eval instant at 1m asin(trig - 10.1) {l="x"} -0.10016742116155944 {l="y"} NaN {l="NaN"} NaN -eval instant at 5m acos(trig - 10.1) +eval instant at 1m acos(trig - 10.1) {l="x"} 1.670963747956456 {l="y"} NaN {l="NaN"} NaN -eval instant at 5m atan(trig) +eval instant at 1m atan(trig) {l="x"} 1.4711276743037345 {l="y"} 1.5208379310729538 {l="NaN"} NaN -eval instant at 5m sinh(trig) +eval instant at 1m sinh(trig) {l="x"} 11013.232920103324 {l="y"} 2.4258259770489514e+08 {l="NaN"} NaN -eval instant at 5m cosh(trig) +eval instant at 1m cosh(trig) {l="x"} 11013.232920103324 {l="y"} 2.4258259770489514e+08 {l="NaN"} NaN -eval instant at 5m tanh(trig) +eval instant at 1m tanh(trig) {l="x"} 0.9999999958776927 {l="y"} 1 {l="NaN"} NaN -eval instant at 5m asinh(trig) +eval instant at 1m asinh(trig) {l="x"} 2.99822295029797 {l="y"} 3.6895038689889055 {l="NaN"} NaN -eval instant at 5m acosh(trig) +eval instant at 1m acosh(trig) {l="x"} 2.993222846126381 {l="y"} 3.6882538673612966 {l="NaN"} NaN -eval instant at 5m atanh(trig - 10.1) +eval instant at 1m atanh(trig - 10.1) {l="x"} -0.10033534773107522 {l="y"} NaN {l="NaN"} NaN -eval instant at 5m rad(trig) +eval instant at 1m rad(trig) {l="x"} 0.17453292519943295 {l="y"} 0.3490658503988659 {l="NaN"} NaN -eval instant at 5m rad(trig - 10) +eval instant at 1m rad(trig - 10) {l="x"} 0 {l="y"} 0.17453292519943295 {l="NaN"} NaN -eval instant at 5m rad(trig - 20) +eval instant at 1m rad(trig - 20) {l="x"} -0.17453292519943295 {l="y"} 0 {l="NaN"} NaN -eval instant at 5m deg(trig) +eval instant at 1m deg(trig) {l="x"} 572.9577951308232 {l="y"} 1145.9155902616465 {l="NaN"} NaN -eval instant at 5m deg(trig - 10) +eval instant at 1m deg(trig - 10) {l="x"} 0 {l="y"} 572.9577951308232 {l="NaN"} NaN -eval instant at 5m deg(trig - 20) +eval instant at 1m deg(trig - 20) {l="x"} -572.9577951308232 {l="y"} 0 {l="NaN"} NaN diff --git a/vendor/github.com/prometheus/prometheus/promql/quantile.go b/vendor/github.com/prometheus/prometheus/promql/quantile.go index 7ddb76acba7..06775d3ae67 100644 --- a/vendor/github.com/prometheus/prometheus/promql/quantile.go +++ b/vendor/github.com/prometheus/prometheus/promql/quantile.go @@ -153,19 +153,31 @@ func bucketQuantile(q float64, buckets buckets) (float64, bool, bool) { // histogramQuantile calculates the quantile 'q' based on the given histogram. // -// The quantile value is interpolated assuming a linear distribution within a -// bucket. -// TODO(beorn7): Find an interpolation method that is a better fit for -// exponential buckets (and think about configurable interpolation). +// For custom buckets, the result is interpolated linearly, i.e. it is assumed +// the observations are uniformly distributed within each bucket. (This is a +// quite blunt assumption, but it is consistent with the interpolation method +// used for classic histograms so far.) +// +// For exponential buckets, the interpolation is done under the assumption that +// the samples within each bucket are distributed in a way that they would +// uniformly populate the buckets in a hypothetical histogram with higher +// resolution. For example, if the rank calculation suggests that the requested +// quantile is right in the middle of the population of the (1,2] bucket, we +// assume the quantile would be right at the bucket boundary between the two +// buckets the (1,2] bucket would be divided into if the histogram had double +// the resolution, which is 2**2**-1 = 1.4142... We call this exponential +// interpolation. +// +// However, for a quantile that ends up in the zero bucket, this method isn't +// very helpful (because there is an infinite number of buckets close to zero, +// so we would have to assume zero as the result). Therefore, we return to +// linear interpolation in the zero bucket. // // A natural lower bound of 0 is assumed if the histogram has only positive // buckets. Likewise, a natural upper bound of 0 is assumed if the histogram has // only negative buckets. -// TODO(beorn7): Come to terms if we want that. // -// There are a number of special cases (once we have a way to report errors -// happening during evaluations of AST functions, we should report those -// explicitly): +// There are a number of special cases: // // If the histogram has 0 observations, NaN is returned. // @@ -193,9 +205,9 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { rank float64 ) - // if there are NaN observations in the histogram (h.Sum is NaN), use the forward iterator - // if the q < 0.5, use the forward iterator - // if the q >= 0.5, use the reverse iterator + // If there are NaN observations in the histogram (h.Sum is NaN), use the forward iterator. + // If q < 0.5, use the forward iterator. + // If q >= 0.5, use the reverse iterator. if math.IsNaN(h.Sum) || q < 0.5 { it = h.AllBucketIterator() rank = q * h.Count @@ -260,8 +272,29 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { rank = count - rank } - // TODO(codesome): Use a better estimation than linear. - return bucket.Lower + (bucket.Upper-bucket.Lower)*(rank/bucket.Count) + // The fraction of how far we are into the current bucket. + fraction := rank / bucket.Count + + // Return linear interpolation for custom buckets and for quantiles that + // end up in the zero bucket. + if h.UsesCustomBuckets() || (bucket.Lower <= 0 && bucket.Upper >= 0) { + return bucket.Lower + (bucket.Upper-bucket.Lower)*fraction + } + + // For exponential buckets, we interpolate on a logarithmic scale. On a + // logarithmic scale, the exponential bucket boundaries (for any schema) + // become linear (every bucket has the same width). Therefore, after + // taking the logarithm of both bucket boundaries, we can use the + // calculated fraction in the same way as for linear interpolation (see + // above). Finally, we return to the normal scale by applying the + // exponential function to the result. + logLower := math.Log2(math.Abs(bucket.Lower)) + logUpper := math.Log2(math.Abs(bucket.Upper)) + if bucket.Lower > 0 { // Positive bucket. + return math.Exp2(logLower + (logUpper-logLower)*fraction) + } + // Otherwise, we are in a negative bucket and have to mirror things. + return -math.Exp2(logUpper + (logLower-logUpper)*(1-fraction)) } // histogramFraction calculates the fraction of observations between the @@ -271,8 +304,8 @@ func histogramQuantile(q float64, h *histogram.FloatHistogram) float64 { // histogramQuantile(0.9, h) returns 123.4, then histogramFraction(-Inf, 123.4, h) // returns 0.9. // -// The same notes (and TODOs) with regard to interpolation and assumptions about -// the zero bucket boundaries apply as for histogramQuantile. +// The same notes with regard to interpolation and assumptions about the zero +// bucket boundaries apply as for histogramQuantile. // // Whether either boundary is inclusive or exclusive doesn’t actually matter as // long as interpolation has to be performed anyway. In the case of a boundary @@ -310,7 +343,35 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6 ) for it.Next() { b := it.At() - if b.Lower < 0 && b.Upper > 0 { + zeroBucket := false + + // interpolateLinearly is used for custom buckets to be + // consistent with the linear interpolation known from classic + // histograms. It is also used for the zero bucket. + interpolateLinearly := func(v float64) float64 { + return rank + b.Count*(v-b.Lower)/(b.Upper-b.Lower) + } + + // interpolateExponentially is using the same exponential + // interpolation method as above for histogramQuantile. This + // method is a better fit for exponential bucketing. + interpolateExponentially := func(v float64) float64 { + var ( + logLower = math.Log2(math.Abs(b.Lower)) + logUpper = math.Log2(math.Abs(b.Upper)) + logV = math.Log2(math.Abs(v)) + fraction float64 + ) + if v > 0 { + fraction = (logV - logLower) / (logUpper - logLower) + } else { + fraction = 1 - ((logV - logUpper) / (logLower - logUpper)) + } + return rank + b.Count*fraction + } + + if b.Lower <= 0 && b.Upper >= 0 { + zeroBucket = true switch { case len(h.NegativeBuckets) == 0 && len(h.PositiveBuckets) > 0: // This is the zero bucket and the histogram has only @@ -325,10 +386,12 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6 } } if !lowerSet && b.Lower >= lower { + // We have hit the lower value at the lower bucket boundary. lowerRank = rank lowerSet = true } if !upperSet && b.Lower >= upper { + // We have hit the upper value at the lower bucket boundary. upperRank = rank upperSet = true } @@ -336,11 +399,21 @@ func histogramFraction(lower, upper float64, h *histogram.FloatHistogram) float6 break } if !lowerSet && b.Lower < lower && b.Upper > lower { - lowerRank = rank + b.Count*(lower-b.Lower)/(b.Upper-b.Lower) + // The lower value is in this bucket. + if h.UsesCustomBuckets() || zeroBucket { + lowerRank = interpolateLinearly(lower) + } else { + lowerRank = interpolateExponentially(lower) + } lowerSet = true } if !upperSet && b.Lower < upper && b.Upper > upper { - upperRank = rank + b.Count*(upper-b.Lower)/(b.Upper-b.Lower) + // The upper value is in this bucket. + if h.UsesCustomBuckets() || zeroBucket { + upperRank = interpolateLinearly(upper) + } else { + upperRank = interpolateExponentially(upper) + } upperSet = true } if lowerSet && upperSet { diff --git a/vendor/github.com/prometheus/prometheus/promql/query_logger.go b/vendor/github.com/prometheus/prometheus/promql/query_logger.go index 7e06ebb97fe..c0a70b66d77 100644 --- a/vendor/github.com/prometheus/prometheus/promql/query_logger.go +++ b/vendor/github.com/prometheus/prometheus/promql/query_logger.go @@ -19,6 +19,7 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "strings" @@ -26,14 +27,12 @@ import ( "unicode/utf8" "github.com/edsrzf/mmap-go" - "github.com/go-kit/log" - "github.com/go-kit/log/level" ) type ActiveQueryTracker struct { - mmapedFile []byte + mmappedFile []byte getNextIndex chan int - logger log.Logger + logger *slog.Logger closer io.Closer maxConcurrent int } @@ -63,11 +62,11 @@ func parseBrokenJSON(brokenJSON []byte) (string, bool) { return queries, true } -func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { +func logUnfinishedQueries(filename string, filesize int, logger *slog.Logger) { if _, err := os.Stat(filename); err == nil { fd, err := os.Open(filename) if err != nil { - level.Error(logger).Log("msg", "Failed to open query log file", "err", err) + logger.Error("Failed to open query log file", "err", err) return } defer fd.Close() @@ -75,7 +74,7 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { brokenJSON := make([]byte, filesize) _, err = fd.Read(brokenJSON) if err != nil { - level.Error(logger).Log("msg", "Failed to read query log file", "err", err) + logger.Error("Failed to read query log file", "err", err) return } @@ -83,72 +82,72 @@ func logUnfinishedQueries(filename string, filesize int, logger log.Logger) { if !queriesExist { return } - level.Info(logger).Log("msg", "These queries didn't finish in prometheus' last run:", "queries", queries) + logger.Info("These queries didn't finish in prometheus' last run:", "queries", queries) } } -type mmapedFile struct { +type mmappedFile struct { f io.Closer m mmap.MMap } -func (f *mmapedFile) Close() error { +func (f *mmappedFile) Close() error { err := f.m.Unmap() if err != nil { - err = fmt.Errorf("mmapedFile: unmapping: %w", err) + err = fmt.Errorf("mmappedFile: unmapping: %w", err) } if fErr := f.f.Close(); fErr != nil { - return errors.Join(fmt.Errorf("close mmapedFile.f: %w", fErr), err) + return errors.Join(fmt.Errorf("close mmappedFile.f: %w", fErr), err) } return err } -func getMMapedFile(filename string, filesize int, logger log.Logger) ([]byte, io.Closer, error) { +func getMMappedFile(filename string, filesize int, logger *slog.Logger) ([]byte, io.Closer, error) { file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0o666) if err != nil { absPath, pathErr := filepath.Abs(filename) if pathErr != nil { absPath = filename } - level.Error(logger).Log("msg", "Error opening query log file", "file", absPath, "err", err) + logger.Error("Error opening query log file", "file", absPath, "err", err) return nil, nil, err } err = file.Truncate(int64(filesize)) if err != nil { file.Close() - level.Error(logger).Log("msg", "Error setting filesize.", "filesize", filesize, "err", err) + logger.Error("Error setting filesize.", "filesize", filesize, "err", err) return nil, nil, err } fileAsBytes, err := mmap.Map(file, mmap.RDWR, 0) if err != nil { file.Close() - level.Error(logger).Log("msg", "Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) + logger.Error("Failed to mmap", "file", filename, "Attempted size", filesize, "err", err) return nil, nil, err } - return fileAsBytes, &mmapedFile{f: file, m: fileAsBytes}, err + return fileAsBytes, &mmappedFile{f: file, m: fileAsBytes}, err } -func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger log.Logger) *ActiveQueryTracker { +func NewActiveQueryTracker(localStoragePath string, maxConcurrent int, logger *slog.Logger) *ActiveQueryTracker { err := os.MkdirAll(localStoragePath, 0o777) if err != nil { - level.Error(logger).Log("msg", "Failed to create directory for logging active queries") + logger.Error("Failed to create directory for logging active queries") } filename, filesize := filepath.Join(localStoragePath, "queries.active"), 1+maxConcurrent*entrySize logUnfinishedQueries(filename, filesize, logger) - fileAsBytes, closer, err := getMMapedFile(filename, filesize, logger) + fileAsBytes, closer, err := getMMappedFile(filename, filesize, logger) if err != nil { panic("Unable to create mmap-ed active query log") } copy(fileAsBytes, "[") activeQueryTracker := ActiveQueryTracker{ - mmapedFile: fileAsBytes, + mmappedFile: fileAsBytes, closer: closer, getNextIndex: make(chan int, maxConcurrent), logger: logger, @@ -174,18 +173,18 @@ func trimStringByBytes(str string, size int) string { return string(bytesStr[:trimIndex]) } -func _newJSONEntry(query string, timestamp int64, logger log.Logger) []byte { +func _newJSONEntry(query string, timestamp int64, logger *slog.Logger) []byte { entry := Entry{query, timestamp} jsonEntry, err := json.Marshal(entry) if err != nil { - level.Error(logger).Log("msg", "Cannot create json of query", "query", query) + logger.Error("Cannot create json of query", "query", query) return []byte{} } return jsonEntry } -func newJSONEntry(query string, logger log.Logger) []byte { +func newJSONEntry(query string, logger *slog.Logger) []byte { timestamp := time.Now().Unix() minEntryJSON := _newJSONEntry("", timestamp, logger) @@ -206,14 +205,14 @@ func (tracker ActiveQueryTracker) GetMaxConcurrent() int { } func (tracker ActiveQueryTracker) Delete(insertIndex int) { - copy(tracker.mmapedFile[insertIndex:], strings.Repeat("\x00", entrySize)) + copy(tracker.mmappedFile[insertIndex:], strings.Repeat("\x00", entrySize)) tracker.getNextIndex <- insertIndex } func (tracker ActiveQueryTracker) Insert(ctx context.Context, query string) (int, error) { select { case i := <-tracker.getNextIndex: - fileBytes := tracker.mmapedFile + fileBytes := tracker.mmappedFile entry := newJSONEntry(query, tracker.logger) start, end := i, i+entrySize diff --git a/vendor/github.com/prometheus/prometheus/promql/value.go b/vendor/github.com/prometheus/prometheus/promql/value.go index f25dbcd7809..f19c0b5b582 100644 --- a/vendor/github.com/prometheus/prometheus/promql/value.go +++ b/vendor/github.com/prometheus/prometheus/promql/value.go @@ -526,7 +526,7 @@ func (ssi *storageSeriesIterator) Next() chunkenc.ValueType { ssi.currH = p.H return chunkenc.ValFloatHistogram default: - panic("storageSeriesIterater.Next failed to pick value type") + panic("storageSeriesIterator.Next failed to pick value type") } } diff --git a/vendor/github.com/prometheus/prometheus/rules/alerting.go b/vendor/github.com/prometheus/prometheus/rules/alerting.go index 038c49a6976..b94f3c5ff56 100644 --- a/vendor/github.com/prometheus/prometheus/rules/alerting.go +++ b/vendor/github.com/prometheus/prometheus/rules/alerting.go @@ -16,13 +16,12 @@ package rules import ( "context" "fmt" + "log/slog" "net/url" "strings" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "go.uber.org/atomic" "gopkg.in/yaml.v2" @@ -141,7 +140,7 @@ type AlertingRule struct { // the fingerprint of the labelset they correspond to. active map[uint64]*Alert - logger log.Logger + logger *slog.Logger noDependentRules *atomic.Bool noDependencyRules *atomic.Bool @@ -151,7 +150,7 @@ type AlertingRule struct { func NewAlertingRule( name string, vec parser.Expr, hold, keepFiringFor time.Duration, labels, annotations, externalLabels labels.Labels, externalURL string, - restored bool, logger log.Logger, + restored bool, logger *slog.Logger, ) *AlertingRule { el := externalLabels.Map() @@ -381,7 +380,7 @@ func (r *AlertingRule) Eval(ctx context.Context, queryOffset time.Duration, ts t result, err := tmpl.Expand() if err != nil { result = fmt.Sprintf("", err) - level.Warn(r.logger).Log("msg", "Expanding alert template failed", "err", err, "data", tmplData) + r.logger.Warn("Expanding alert template failed", "err", err, "data", tmplData) } return result } diff --git a/vendor/github.com/prometheus/prometheus/rules/group.go b/vendor/github.com/prometheus/prometheus/rules/group.go index 539149da4c8..b6feb6f9625 100644 --- a/vendor/github.com/prometheus/prometheus/rules/group.go +++ b/vendor/github.com/prometheus/prometheus/rules/group.go @@ -16,9 +16,9 @@ package rules import ( "context" "errors" + "log/slog" "math" "slices" - "sort" "strings" "sync" "time" @@ -27,10 +27,9 @@ import ( "github.com/prometheus/prometheus/promql/parser" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" @@ -67,7 +66,7 @@ type Group struct { terminated chan struct{} managerDone chan struct{} - logger log.Logger + logger *slog.Logger metrics *Metrics @@ -76,8 +75,8 @@ type Group struct { evalIterationFunc GroupEvalIterationFunc // concurrencyController controls the rules evaluation concurrency. - concurrencyController RuleConcurrencyController - + concurrencyController RuleConcurrencyController + appOpts *storage.AppendOptions alignEvaluationTimeOnInterval bool } @@ -104,9 +103,13 @@ type GroupOptions struct { // NewGroup makes a new Group with the given name, options, and rules. func NewGroup(o GroupOptions) *Group { - metrics := o.Opts.Metrics + opts := o.Opts + if opts == nil { + opts = &ManagerOptions{} + } + metrics := opts.Metrics if metrics == nil { - metrics = NewGroupMetrics(o.Opts.Registerer) + metrics = NewGroupMetrics(opts.Registerer) } key := GroupKey(o.File, o.Name) @@ -125,30 +128,34 @@ func NewGroup(o GroupOptions) *Group { evalIterationFunc = DefaultEvalIterationFunc } - concurrencyController := o.Opts.RuleConcurrencyController + concurrencyController := opts.RuleConcurrencyController if concurrencyController == nil { concurrencyController = sequentialRuleEvalController{} } - return &Group{ - name: o.Name, - file: o.File, - interval: o.Interval, - queryOffset: o.QueryOffset, - limit: o.Limit, - rules: o.Rules, - shouldRestore: o.ShouldRestore, - opts: o.Opts, - sourceTenants: o.SourceTenants, - seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)), - done: make(chan struct{}), - managerDone: o.done, - terminated: make(chan struct{}), - logger: log.With(o.Opts.Logger, "file", o.File, "group", o.Name), - metrics: metrics, - evalIterationFunc: evalIterationFunc, - concurrencyController: concurrencyController, + if opts.Logger == nil { + opts.Logger = promslog.NewNopLogger() + } + return &Group{ + name: o.Name, + file: o.File, + interval: o.Interval, + queryOffset: o.QueryOffset, + limit: o.Limit, + rules: o.Rules, + shouldRestore: o.ShouldRestore, + opts: opts, + sourceTenants: o.SourceTenants, + seriesInPreviousEval: make([]map[string]labels.Labels, len(o.Rules)), + done: make(chan struct{}), + managerDone: o.done, + terminated: make(chan struct{}), + logger: opts.Logger.With("file", o.File, "group", o.Name), + metrics: metrics, + evalIterationFunc: evalIterationFunc, + concurrencyController: concurrencyController, + appOpts: &storage.AppendOptions{DiscardOutOfOrder: true}, alignEvaluationTimeOnInterval: o.AlignEvaluationTimeOnInterval, } } @@ -197,7 +204,7 @@ func matchesMatcherSets(matcherSets [][]*labels.Matcher, lbls labels.Labels) boo return ok } -// Queryable returns the group's querable. +// Queryable returns the group's queryable. func (g *Group) Queryable() storage.Queryable { return g.opts.Queryable } // Context returns the group's context. @@ -213,7 +220,7 @@ func (g *Group) Limit() int { return g.limit } // If it's empty or nil, then the owning user/tenant is considered to be the source tenant. func (g *Group) SourceTenants() []string { return g.sourceTenants } -func (g *Group) Logger() log.Logger { return g.logger } +func (g *Group) Logger() *slog.Logger { return g.logger } func (g *Group) run(ctx context.Context) { defer close(g.terminated) @@ -285,7 +292,7 @@ func (g *Group) run(ctx context.Context) { g.RestoreForState(restoreStartTime) totalRestoreTimeSeconds := time.Since(restoreStartTime).Seconds() g.metrics.GroupLastRestoreDuration.WithLabelValues(GroupKey(g.file, g.name)).Set(totalRestoreTimeSeconds) - level.Debug(g.logger).Log("msg", "'for' state restoration completed", "duration_seconds", totalRestoreTimeSeconds) + g.logger.Debug("'for' state restoration completed", "duration_seconds", totalRestoreTimeSeconds) g.shouldRestore = false } @@ -514,7 +521,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { defer cleanup() } - logger := log.WithPrefix(g.logger, "name", rule.Name(), "index", i) + logger := g.logger.With("name", rule.Name(), "index", i) ctx, sp := otel.Tracer("").Start(ctx, "rule") sp.SetAttributes(attribute.String("name", rule.Name())) defer func(t time.Time) { @@ -527,7 +534,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { }(time.Now()) if sp.SpanContext().IsSampled() && sp.SpanContext().HasTraceID() { - logger = log.WithPrefix(logger, "trace_id", sp.SpanContext().TraceID()) + logger = logger.With("trace_id", sp.SpanContext().TraceID()) } g.metrics.EvalTotal.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() @@ -543,7 +550,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { // happens on shutdown and thus we skip logging of any errors here. var eqc promql.ErrQueryCanceled if !errors.As(err, &eqc) { - level.Warn(logger).Log("msg", "Evaluating rule failed", "rule", rule, "err", err) + logger.Warn("Evaluating rule failed", "rule", rule, "err", err) } return } @@ -569,7 +576,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { sp.SetStatus(codes.Error, err.Error()) g.metrics.EvalFailures.WithLabelValues(GroupKey(g.File(), g.Name())).Inc() - level.Warn(logger).Log("msg", "Rule sample appending failed", "err", err) + logger.Warn("Rule sample appending failed", "err", err) return } g.seriesInPreviousEval[i] = seriesReturned @@ -579,6 +586,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { if s.H != nil { _, err = app.AppendHistogram(0, s.Metric, s.T, nil, s.H) } else { + app.SetOptions(g.appOpts) _, err = app.Append(0, s.Metric, s.T, s.F) } @@ -593,15 +601,15 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { switch { case errors.Is(unwrappedErr, storage.ErrOutOfOrderSample): numOutOfOrder++ - level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) case errors.Is(unwrappedErr, storage.ErrTooOldSample): numTooOld++ - level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) case errors.Is(unwrappedErr, storage.ErrDuplicateSampleForTimestamp): numDuplicates++ - level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) default: - level.Warn(logger).Log("msg", "Rule evaluation result discarded", "err", err, "sample", s) + logger.Warn("Rule evaluation result discarded", "err", err, "sample", s) } } else { buf := [1024]byte{} @@ -609,13 +617,13 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { } } if numOutOfOrder > 0 { - level.Warn(logger).Log("msg", "Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder) + logger.Warn("Error on ingesting out-of-order result from rule evaluation", "num_dropped", numOutOfOrder) } if numTooOld > 0 { - level.Warn(logger).Log("msg", "Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld) + logger.Warn("Error on ingesting too old result from rule evaluation", "num_dropped", numTooOld) } if numDuplicates > 0 { - level.Warn(logger).Log("msg", "Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates) + logger.Warn("Error on ingesting results from rule evaluation with different value but same timestamp", "num_dropped", numDuplicates) } for metric, lset := range g.seriesInPreviousEval[i] { @@ -634,7 +642,7 @@ func (g *Group) Eval(ctx context.Context, ts time.Time) { // Do not count these in logging, as this is expected if series // is exposed from a different rule. default: - level.Warn(logger).Log("msg", "Adding stale sample failed", "sample", lset.String(), "err", err) + logger.Warn("Adding stale sample failed", "sample", lset.String(), "err", err) } } } @@ -675,6 +683,7 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) { return } app := g.opts.Appendable.Appender(ctx) + app.SetOptions(g.appOpts) queryOffset := g.QueryOffset() for _, s := range g.staleSeries { // Rule that produced series no longer configured, mark it stale. @@ -691,11 +700,11 @@ func (g *Group) cleanupStaleSeries(ctx context.Context, ts time.Time) { // Do not count these in logging, as this is expected if series // is exposed from a different rule. default: - level.Warn(g.logger).Log("msg", "Adding stale sample for previous configuration failed", "sample", s, "err", err) + g.logger.Warn("Adding stale sample for previous configuration failed", "sample", s, "err", err) } } if err := app.Commit(); err != nil { - level.Warn(g.logger).Log("msg", "Stale sample appending for previous configuration failed", "err", err) + g.logger.Warn("Stale sample appending for previous configuration failed", "err", err) } else { g.staleSeries = nil } @@ -710,12 +719,12 @@ func (g *Group) RestoreForState(ts time.Time) { mintMS := int64(model.TimeFromUnixNano(mint.UnixNano())) q, err := g.opts.Queryable.Querier(mintMS, maxtMS) if err != nil { - level.Error(g.logger).Log("msg", "Failed to get Querier", "err", err) + g.logger.Error("Failed to get Querier", "err", err) return } defer func() { if err := q.Close(); err != nil { - level.Error(g.logger).Log("msg", "Failed to close Querier", "err", err) + g.logger.Error("Failed to close Querier", "err", err) } }() @@ -736,8 +745,8 @@ func (g *Group) RestoreForState(ts time.Time) { sset, err := alertRule.QueryForStateSeries(g.opts.Context, q) if err != nil { - level.Error(g.logger).Log( - "msg", "Failed to restore 'for' state", + g.logger.Error( + "Failed to restore 'for' state", labels.AlertName, alertRule.Name(), "stage", "Select", "err", err, @@ -756,7 +765,7 @@ func (g *Group) RestoreForState(ts time.Time) { // No results for this alert rule. if len(seriesByLabels) == 0 { - level.Debug(g.logger).Log("msg", "No series found to restore the 'for' state of the alert rule", labels.AlertName, alertRule.Name()) + g.logger.Debug("No series found to restore the 'for' state of the alert rule", labels.AlertName, alertRule.Name()) alertRule.SetRestored(true) continue } @@ -776,7 +785,7 @@ func (g *Group) RestoreForState(ts time.Time) { t, v = it.At() } if it.Err() != nil { - level.Error(g.logger).Log("msg", "Failed to restore 'for' state", + g.logger.Error("Failed to restore 'for' state", labels.AlertName, alertRule.Name(), "stage", "Iterator", "err", it.Err()) return } @@ -818,7 +827,7 @@ func (g *Group) RestoreForState(ts time.Time) { } a.ActiveAt = restoredActiveAt - level.Debug(g.logger).Log("msg", "'for' state restored", + g.logger.Debug("'for' state restored", labels.AlertName, alertRule.Name(), "restored_time", a.ActiveAt.Format(time.RFC850), "labels", a.Labels.String()) }) @@ -871,7 +880,7 @@ func (g *Group) Equals(ng *Group) bool { copyAndSort := func(x []string) []string { copied := make([]string, len(x)) copy(copied, x) - sort.Strings(copied) + slices.Sort(copied) return copied } diff --git a/vendor/github.com/prometheus/prometheus/rules/manager.go b/vendor/github.com/prometheus/prometheus/rules/manager.go index a2421a4d2d5..b5bb0151166 100644 --- a/vendor/github.com/prometheus/prometheus/rules/manager.go +++ b/vendor/github.com/prometheus/prometheus/rules/manager.go @@ -17,15 +17,15 @@ import ( "context" "errors" "fmt" + "log/slog" "net/url" "slices" "strings" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "golang.org/x/sync/semaphore" "github.com/prometheus/prometheus/model/labels" @@ -96,7 +96,7 @@ type Manager struct { done chan struct{} restored bool - logger log.Logger + logger *slog.Logger } // NotifyFunc sends notifications about a set of alerts generated by the given expression. @@ -112,7 +112,7 @@ type ManagerOptions struct { Context context.Context Appendable storage.Appendable Queryable storage.Queryable - Logger log.Logger + Logger *slog.Logger Registerer prometheus.Registerer OutageTolerance time.Duration ForGracePeriod time.Duration @@ -158,6 +158,10 @@ func NewManager(o *ManagerOptions) *Manager { o.RuleDependencyController = ruleDependencyController{} } + if o.Logger == nil { + o.Logger = promslog.NewNopLogger() + } + m := &Manager{ groups: map[string]*Group{}, opts: o, @@ -171,7 +175,7 @@ func NewManager(o *ManagerOptions) *Manager { // Run starts processing of the rule manager. It is blocking. func (m *Manager) Run() { - level.Info(m.logger).Log("msg", "Starting rule manager...") + m.logger.Info("Starting rule manager...") m.start() <-m.done } @@ -185,7 +189,7 @@ func (m *Manager) Stop() { m.mtx.Lock() defer m.mtx.Unlock() - level.Info(m.logger).Log("msg", "Stopping rule manager...") + m.logger.Info("Stopping rule manager...") for _, eg := range m.groups { eg.stop() @@ -195,7 +199,7 @@ func (m *Manager) Stop() { // staleness markers. close(m.done) - level.Info(m.logger).Log("msg", "Rule manager stopped") + m.logger.Info("Rule manager stopped") } // Update the rule manager's state as the config requires. If @@ -216,7 +220,7 @@ func (m *Manager) Update(interval time.Duration, files []string, externalLabels if errs != nil { for _, e := range errs { - level.Error(m.logger).Log("msg", "loading groups failed", "err", e) + m.logger.Error("loading groups failed", "err", e) } return errors.New("error loading rules, previous rule set restored") } @@ -327,25 +331,27 @@ func (m *Manager) LoadGroups( return nil, []error{fmt.Errorf("%s: %w", fn, err)} } + mLabels := FromMaps(rg.Labels, r.Labels) + if r.Alert.Value != "" { rules = append(rules, NewAlertingRule( r.Alert.Value, expr, time.Duration(r.For), time.Duration(r.KeepFiringFor), - labels.FromMap(r.Labels), + mLabels, labels.FromMap(r.Annotations), externalLabels, externalURL, !shouldRestore, - log.With(m.logger, "alert", r.Alert), + m.logger.With("alert", r.Alert), )) continue } rules = append(rules, NewRecordingRule( r.Record.Value, expr, - labels.FromMap(r.Labels), + mLabels, )) } @@ -530,3 +536,16 @@ func (c sequentialRuleEvalController) Allow(_ context.Context, _ *Group, _ Rule) } func (c sequentialRuleEvalController) Done(_ context.Context) {} + +// FromMaps returns new sorted Labels from the given maps, overriding each other in order. +func FromMaps(maps ...map[string]string) labels.Labels { + mLables := make(map[string]string) + + for _, m := range maps { + for k, v := range m { + mLables[k] = v + } + } + + return labels.FromMap(mLables) +} diff --git a/vendor/github.com/prometheus/prometheus/scrape/manager.go b/vendor/github.com/prometheus/prometheus/scrape/manager.go index e3dba5f0eed..f3dad2a0488 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/manager.go +++ b/vendor/github.com/prometheus/prometheus/scrape/manager.go @@ -17,31 +17,32 @@ import ( "errors" "fmt" "hash/fnv" + "log/slog" "reflect" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/osutil" "github.com/prometheus/prometheus/util/pool" ) // NewManager is the Manager constructor. -func NewManager(o *Options, logger log.Logger, app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) { +func NewManager(o *Options, logger *slog.Logger, newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error), app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) { if o == nil { o = &Options{} } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } sm, err := newScrapeMetrics(registerer) @@ -50,15 +51,16 @@ func NewManager(o *Options, logger log.Logger, app storage.Appendable, registere } m := &Manager{ - append: app, - opts: o, - logger: logger, - scrapeConfigs: make(map[string]*config.ScrapeConfig), - scrapePools: make(map[string]*scrapePool), - graceShut: make(chan struct{}), - triggerReload: make(chan struct{}, 1), - metrics: sm, - buffers: pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }), + append: app, + opts: o, + logger: logger, + newScrapeFailureLogger: newScrapeFailureLogger, + scrapeConfigs: make(map[string]*config.ScrapeConfig), + scrapePools: make(map[string]*scrapePool), + graceShut: make(chan struct{}), + triggerReload: make(chan struct{}, 1), + metrics: sm, + buffers: pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }), } m.metrics.setTargetMetadataCacheGatherer(m) @@ -68,8 +70,7 @@ func NewManager(o *Options, logger log.Logger, app storage.Appendable, registere // Options are the configuration parameters to the scrape manager. type Options struct { - ExtraMetrics bool - NoDefaultPort bool + ExtraMetrics bool // Option used by downstream scraper users like OpenTelemetry Collector // to help lookup metric metadata. Should be false for Prometheus. PassMetadataInContext bool @@ -99,16 +100,18 @@ const DefaultNameEscapingScheme = model.ValueEncodingEscaping // when receiving new target groups from the discovery manager. type Manager struct { opts *Options - logger log.Logger + logger *slog.Logger append storage.Appendable graceShut chan struct{} - offsetSeed uint64 // Global offsetSeed seed is used to spread scrape workload across HA setup. - mtxScrape sync.Mutex // Guards the fields below. - scrapeConfigs map[string]*config.ScrapeConfig - scrapePools map[string]*scrapePool - targetSets map[string][]*targetgroup.Group - buffers *pool.Pool + offsetSeed uint64 // Global offsetSeed seed is used to spread scrape workload across HA setup. + mtxScrape sync.Mutex // Guards the fields below. + scrapeConfigs map[string]*config.ScrapeConfig + scrapePools map[string]*scrapePool + newScrapeFailureLogger func(string) (*logging.JSONFileLogger, error) + scrapeFailureLoggers map[string]*logging.JSONFileLogger + targetSets map[string][]*targetgroup.Group + buffers *pool.Pool triggerReload chan struct{} @@ -172,17 +175,27 @@ func (m *Manager) reload() { if _, ok := m.scrapePools[setName]; !ok { scrapeConfig, ok := m.scrapeConfigs[setName] if !ok { - level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName) + m.logger.Error("error reloading target set", "err", "invalid config id:"+setName) + continue + } + if scrapeConfig.ConvertClassicHistogramsToNHCB && m.opts.EnableCreatedTimestampZeroIngestion { + // TODO(krajorama): fix https://github.com/prometheus/prometheus/issues/15137 + m.logger.Error("error reloading target set", "err", "cannot convert classic histograms to native histograms with custom buckets and ingest created timestamp zero samples at the same time due to https://github.com/prometheus/prometheus/issues/15137") continue } m.metrics.targetScrapePools.Inc() - sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, log.With(m.logger, "scrape_pool", setName), m.buffers, m.opts, m.metrics) + sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, m.logger.With("scrape_pool", setName), m.buffers, m.opts, m.metrics) if err != nil { m.metrics.targetScrapePoolsFailed.Inc() - level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName) + m.logger.Error("error creating new scrape pool", "err", err, "scrape_pool", setName) continue } m.scrapePools[setName] = sp + if l, ok := m.scrapeFailureLoggers[scrapeConfig.ScrapeFailureLogFile]; ok { + sp.SetScrapeFailureLogger(l) + } else { + sp.logger.Error("No logger found. This is a bug in Prometheus that should be reported upstream.", "scrape_pool", setName) + } } wg.Add(1) @@ -238,11 +251,36 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { } c := make(map[string]*config.ScrapeConfig) + scrapeFailureLoggers := map[string]*logging.JSONFileLogger{ + "": nil, // Emptying the file name sets the scrape logger to nil. + } for _, scfg := range scfgs { c[scfg.JobName] = scfg + if _, ok := scrapeFailureLoggers[scfg.ScrapeFailureLogFile]; !ok { + // We promise to reopen the file on each reload. + var ( + logger *logging.JSONFileLogger + err error + ) + if m.newScrapeFailureLogger != nil { + if logger, err = m.newScrapeFailureLogger(scfg.ScrapeFailureLogFile); err != nil { + return err + } + } + scrapeFailureLoggers[scfg.ScrapeFailureLogFile] = logger + } } m.scrapeConfigs = c + oldScrapeFailureLoggers := m.scrapeFailureLoggers + for _, s := range oldScrapeFailureLoggers { + if s != nil { + defer s.Close() + } + } + + m.scrapeFailureLoggers = scrapeFailureLoggers + if err := m.setOffsetSeed(cfg.GlobalConfig.ExternalLabels); err != nil { return err } @@ -257,9 +295,16 @@ func (m *Manager) ApplyConfig(cfg *config.Config) error { case !reflect.DeepEqual(sp.config, cfg): err := sp.reload(cfg) if err != nil { - level.Error(m.logger).Log("msg", "error reloading scrape pool", "err", err, "scrape_pool", name) + m.logger.Error("error reloading scrape pool", "err", err, "scrape_pool", name) failed = true } + fallthrough + case ok: + if l, ok := m.scrapeFailureLoggers[cfg.ScrapeFailureLogFile]; ok { + sp.SetScrapeFailureLogger(l) + } else { + sp.logger.Error("No logger found. This is a bug in Prometheus that should be reported upstream.", "scrape_pool", name) + } } } diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index 2abd4691d6d..7e270bb3a3c 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "net/http" "reflect" @@ -29,11 +30,10 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/klauspost/compress/gzip" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" "github.com/prometheus/prometheus/config" @@ -47,6 +47,7 @@ import ( "github.com/prometheus/prometheus/model/timestamp" "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/util/logging" "github.com/prometheus/prometheus/util/pool" ) @@ -63,7 +64,7 @@ var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", labels. // scrapePool manages scrapes for sets of targets. type scrapePool struct { appendable storage.Appendable - logger log.Logger + logger *slog.Logger cancel context.CancelFunc httpOpts []config_util.HTTPClientOption @@ -87,9 +88,10 @@ type scrapePool struct { // Constructor for new scrape loops. This is settable for testing convenience. newLoop func(scrapeLoopOptions) loop - noDefaultPort bool - metrics *scrapeMetrics + + scrapeFailureLogger *logging.JSONFileLogger + scrapeFailureLoggerMtx sync.RWMutex } type labelLimits struct { @@ -110,8 +112,10 @@ type scrapeLoopOptions struct { trackTimestampsStaleness bool interval time.Duration timeout time.Duration - scrapeClassicHistograms bool + alwaysScrapeClassicHist bool + convertClassicHistToNHCB bool validationScheme model.ValidationScheme + fallbackScrapeProtocol string mrc []*relabel.Config cache *scrapeCache @@ -123,9 +127,9 @@ const maxAheadTime = 10 * time.Minute // returning an empty label set is interpreted as "drop". type labelsMutator func(labels.Labels) labels.Labels -func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) { +func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger *slog.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...) @@ -146,7 +150,6 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed logger: logger, metrics: metrics, httpOpts: options.HTTPClientOptions, - noDefaultPort: options.NoDefaultPort, } sp.newLoop = func(opts scrapeLoopOptions) loop { // Update the targets retrieval function for metadata to a new scrape cache. @@ -159,7 +162,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed return newScrapeLoop( ctx, opts.scraper, - log.With(logger, "target", opts.target), + logger.With("target", opts.target), buffers, func(l labels.Labels) labels.Labels { return mutateSampleLabels(l, opts.target, opts.honorLabels, opts.mrc) @@ -178,7 +181,8 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed opts.labelLimits, opts.interval, opts.timeout, - opts.scrapeClassicHistograms, + opts.alwaysScrapeClassicHist, + opts.convertClassicHistToNHCB, options.EnableNativeHistogramsIngestion, options.EnableCreatedTimestampZeroIngestion, options.ExtraMetrics, @@ -188,6 +192,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed metrics, options.skipOffsetting, opts.validationScheme, + opts.fallbackScrapeProtocol, ) } sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) @@ -218,6 +223,27 @@ func (sp *scrapePool) DroppedTargetsCount() int { return sp.droppedTargetsCount } +func (sp *scrapePool) SetScrapeFailureLogger(l *logging.JSONFileLogger) { + sp.scrapeFailureLoggerMtx.Lock() + defer sp.scrapeFailureLoggerMtx.Unlock() + if l != nil { + l.With("job_name", sp.config.JobName) + } + sp.scrapeFailureLogger = l + + sp.targetMtx.Lock() + defer sp.targetMtx.Unlock() + for _, s := range sp.loops { + s.setScrapeFailureLogger(sp.scrapeFailureLogger) + } +} + +func (sp *scrapePool) getScrapeFailureLogger() *logging.JSONFileLogger { + sp.scrapeFailureLoggerMtx.RLock() + defer sp.scrapeFailureLoggerMtx.RUnlock() + return sp.scrapeFailureLogger +} + // stop terminates all scrape loops and returns after they all terminated. func (sp *scrapePool) stop() { sp.mtx.Lock() @@ -303,11 +329,12 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { enableCompression = sp.config.EnableCompression trackTimestampsStaleness = sp.config.TrackTimestampsStaleness mrc = sp.config.MetricRelabelConfigs + fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType() ) - validationScheme := model.LegacyValidation - if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig { - validationScheme = model.UTF8Validation + validationScheme := model.UTF8Validation + if sp.config.MetricNameValidationScheme == config.LegacyValidationConfig { + validationScheme = model.LegacyValidation } sp.targetMtx.Lock() @@ -349,6 +376,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { interval: interval, timeout: timeout, validationScheme: validationScheme, + fallbackScrapeProtocol: fallbackScrapeProtocol, }) ) if err != nil { @@ -361,6 +389,7 @@ func (sp *scrapePool) restartLoops(reuseCache bool) { wg.Done() newLoop.setForcedError(forcedErr) + newLoop.setScrapeFailureLogger(sp.getScrapeFailureLogger()) newLoop.run(nil) }(oldLoop, newLoop) @@ -404,9 +433,9 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { sp.droppedTargets = []*Target{} sp.droppedTargetsCount = 0 for _, tg := range tgs { - targets, failures := TargetsFromGroup(tg, sp.config, sp.noDefaultPort, targets, lb) + targets, failures := TargetsFromGroup(tg, sp.config, targets, lb) for _, err := range failures { - level.Error(sp.logger).Log("msg", "Creating target failed", "err", err) + sp.logger.Error("Creating target failed", "err", err) } sp.metrics.targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures))) for _, t := range targets { @@ -457,12 +486,14 @@ func (sp *scrapePool) sync(targets []*Target) { enableCompression = sp.config.EnableCompression trackTimestampsStaleness = sp.config.TrackTimestampsStaleness mrc = sp.config.MetricRelabelConfigs - scrapeClassicHistograms = sp.config.ScrapeClassicHistograms + fallbackScrapeProtocol = sp.config.ScrapeFallbackProtocol.HeaderMediaType() + alwaysScrapeClassicHist = sp.config.AlwaysScrapeClassicHistograms + convertClassicHistToNHCB = sp.config.ConvertClassicHistogramsToNHCB ) - validationScheme := model.LegacyValidation - if sp.config.MetricNameValidationScheme == config.UTF8ValidationConfig { - validationScheme = model.UTF8Validation + validationScheme := model.UTF8Validation + if sp.config.MetricNameValidationScheme == config.LegacyValidationConfig { + validationScheme = model.LegacyValidation } sp.targetMtx.Lock() @@ -498,11 +529,15 @@ func (sp *scrapePool) sync(targets []*Target) { mrc: mrc, interval: interval, timeout: timeout, - scrapeClassicHistograms: scrapeClassicHistograms, + alwaysScrapeClassicHist: alwaysScrapeClassicHist, + convertClassicHistToNHCB: convertClassicHistToNHCB, + validationScheme: validationScheme, + fallbackScrapeProtocol: fallbackScrapeProtocol, }) if err != nil { l.setForcedError(err) } + l.setScrapeFailureLogger(sp.scrapeFailureLogger) sp.activeTargets[hash] = t sp.loops[hash] = l @@ -825,6 +860,7 @@ func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w type loop interface { run(errc chan<- error) setForcedError(err error) + setScrapeFailureLogger(*logging.JSONFileLogger) stop() getCache() *scrapeCache disableEndOfRunStalenessMarkers() @@ -839,7 +875,9 @@ type cacheEntry struct { type scrapeLoop struct { scraper scraper - l log.Logger + l *slog.Logger + scrapeFailureLogger *logging.JSONFileLogger + scrapeFailureLoggerMtx sync.RWMutex cache *scrapeCache lastScrapeSize int buffers *pool.Pool @@ -855,8 +893,10 @@ type scrapeLoop struct { labelLimits *labelLimits interval time.Duration timeout time.Duration - scrapeClassicHistograms bool + alwaysScrapeClassicHist bool + convertClassicHistToNHCB bool validationScheme model.ValidationScheme + fallbackScrapeProtocol string // Feature flagged options. enableNativeHistogramIngestion bool @@ -1138,7 +1178,7 @@ func (c *scrapeCache) LengthMetadata() int { func newScrapeLoop(ctx context.Context, sc scraper, - l log.Logger, + l *slog.Logger, buffers *pool.Pool, sampleMutator labelsMutator, reportSampleMutator labelsMutator, @@ -1155,7 +1195,8 @@ func newScrapeLoop(ctx context.Context, labelLimits *labelLimits, interval time.Duration, timeout time.Duration, - scrapeClassicHistograms bool, + alwaysScrapeClassicHist bool, + convertClassicHistToNHCB bool, enableNativeHistogramIngestion bool, enableCTZeroIngestion bool, reportExtraMetrics bool, @@ -1165,9 +1206,10 @@ func newScrapeLoop(ctx context.Context, metrics *scrapeMetrics, skipOffsetting bool, validationScheme model.ValidationScheme, + fallbackScrapeProtocol string, ) *scrapeLoop { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } if buffers == nil { buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) @@ -1209,7 +1251,8 @@ func newScrapeLoop(ctx context.Context, labelLimits: labelLimits, interval: interval, timeout: timeout, - scrapeClassicHistograms: scrapeClassicHistograms, + alwaysScrapeClassicHist: alwaysScrapeClassicHist, + convertClassicHistToNHCB: convertClassicHistToNHCB, enableNativeHistogramIngestion: enableNativeHistogramIngestion, enableCTZeroIngestion: enableCTZeroIngestion, reportExtraMetrics: reportExtraMetrics, @@ -1217,12 +1260,22 @@ func newScrapeLoop(ctx context.Context, metrics: metrics, skipOffsetting: skipOffsetting, validationScheme: validationScheme, + fallbackScrapeProtocol: fallbackScrapeProtocol, } sl.ctx, sl.cancel = context.WithCancel(ctx) return sl } +func (sl *scrapeLoop) setScrapeFailureLogger(l *logging.JSONFileLogger) { + sl.scrapeFailureLoggerMtx.Lock() + defer sl.scrapeFailureLoggerMtx.Unlock() + if ts, ok := sl.scraper.(fmt.Stringer); ok && l != nil { + l.With("target", ts.String()) + } + sl.scrapeFailureLogger = l +} + func (sl *scrapeLoop) run(errc chan<- error) { if !sl.skipOffsetting { select { @@ -1316,13 +1369,13 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er } err = app.Commit() if err != nil { - level.Error(sl.l).Log("msg", "Scrape commit failed", "err", err) + sl.l.Error("Scrape commit failed", "err", err) } }() defer func() { if err = sl.report(app, appendTime, time.Since(start), total, added, seriesAdded, bytesRead, scrapeErr); err != nil { - level.Warn(sl.l).Log("msg", "Appending scrape report failed", "err", err) + sl.l.Warn("Appending scrape report failed", "err", err) } }() @@ -1332,7 +1385,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil { app.Rollback() app = sl.appender(sl.appenderCtx) - level.Warn(sl.l).Log("msg", "Append failed", "err", err) + sl.l.Warn("Append failed", "err", err) } if errc != nil { errc <- forcedErr @@ -1365,7 +1418,12 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er } bytesRead = len(b) } else { - level.Debug(sl.l).Log("msg", "Scrape failed", "err", scrapeErr) + sl.l.Debug("Scrape failed", "err", scrapeErr) + sl.scrapeFailureLoggerMtx.RLock() + if sl.scrapeFailureLogger != nil { + sl.scrapeFailureLogger.Error("err", scrapeErr) + } + sl.scrapeFailureLoggerMtx.RUnlock() if errc != nil { errc <- scrapeErr } @@ -1380,13 +1438,13 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er if appErr != nil { app.Rollback() app = sl.appender(sl.appenderCtx) - level.Debug(sl.l).Log("msg", "Append failed", "err", appErr) + sl.l.Debug("Append failed", "err", appErr) // The append failed, probably due to a parse error or sample limit. // Call sl.append again with an empty scrape to trigger stale markers. if _, _, _, err := sl.append(app, []byte{}, "", appendTime); err != nil { app.Rollback() app = sl.appender(sl.appenderCtx) - level.Warn(sl.l).Log("msg", "Append failed", "err", err) + sl.l.Warn("Append failed", "err", err) } } @@ -1459,16 +1517,16 @@ func (sl *scrapeLoop) endOfRunStaleness(last time.Time, ticker *time.Ticker, int } err = app.Commit() if err != nil { - level.Warn(sl.l).Log("msg", "Stale commit failed", "err", err) + sl.l.Warn("Stale commit failed", "err", err) } }() if _, _, _, err = sl.append(app, []byte{}, "", staleTime); err != nil { app.Rollback() app = sl.appender(sl.appenderCtx) - level.Warn(sl.l).Log("msg", "Stale append failed", "err", err) + sl.l.Warn("Stale append failed", "err", err) } if err = sl.reportStale(app, staleTime); err != nil { - level.Warn(sl.l).Log("msg", "Stale report failed", "err", err) + sl.l.Warn("Stale report failed", "err", err) } } @@ -1495,11 +1553,24 @@ type appendErrors struct { } func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, ts time.Time) (total, added, seriesAdded int, err error) { - p, err := textparse.New(b, contentType, sl.scrapeClassicHistograms, sl.symbolTable) + p, err := textparse.New(b, contentType, sl.fallbackScrapeProtocol, sl.alwaysScrapeClassicHist, sl.enableCTZeroIngestion, sl.symbolTable) + if p == nil { + sl.l.Error( + "Failed to determine correct type of scrape target.", + "content_type", contentType, + "fallback_media_type", sl.fallbackScrapeProtocol, + "err", err, + ) + return + } + if sl.convertClassicHistToNHCB { + p = textparse.NewNHCBParser(p, sl.symbolTable, sl.alwaysScrapeClassicHist) + } if err != nil { - level.Debug(sl.l).Log( - "msg", "Invalid content type on scrape, using prometheus parser as fallback.", + sl.l.Debug( + "Invalid content type on scrape, using fallback setting.", "content_type", contentType, + "fallback_media_type", sl.fallbackScrapeProtocol, "err", err, ) } @@ -1515,7 +1586,7 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, metadataChanged bool ) - exemplars := make([]exemplar.Exemplar, 1) + exemplars := make([]exemplar.Exemplar, 0, 1) // updateMetadata updates the current iteration's metadata object and the // metadataChanged value if we have metadata in the scrape cache AND the @@ -1657,11 +1728,19 @@ loop: } else { if sl.enableCTZeroIngestion { if ctMs := p.CreatedTimestamp(); ctMs != nil { - ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + if isHistogram && sl.enableNativeHistogramIngestion { + if h != nil { + ref, err = app.AppendHistogramCTZeroSample(ref, lset, t, *ctMs, h, nil) + } else { + ref, err = app.AppendHistogramCTZeroSample(ref, lset, t, *ctMs, nil, fh) + } + } else { + ref, err = app.AppendCTZeroSample(ref, lset, t, *ctMs) + } if err != nil && !errors.Is(err, storage.ErrOutOfOrderCT) { // OOO is a common case, ignoring completely for now. // CT is an experimental feature. For now, we don't need to fail the // scrape on errors updating the created timestamp, log debug. - level.Debug(sl.l).Log("msg", "Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) + sl.l.Debug("Error when appending CT in scrape loop", "series", string(met), "ct", *ctMs, "t", t, "err", err) } } } @@ -1686,7 +1765,7 @@ loop: sampleAdded, err = sl.checkAddError(met, err, &sampleLimitErr, &bucketLimitErr, &appErrs) if err != nil { if !errors.Is(err, storage.ErrNotFound) { - level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err) + sl.l.Debug("Unexpected error", "series", string(met), "err", err) } break loop } @@ -1738,21 +1817,21 @@ loop: outOfOrderExemplars++ default: // Since exemplar storage is still experimental, we don't fail the scrape on ingestion errors. - level.Debug(sl.l).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr) + sl.l.Debug("Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr) } } if outOfOrderExemplars > 0 && outOfOrderExemplars == len(exemplars) { // Only report out of order exemplars if all are out of order, otherwise this was a partial update // to some existing set of exemplars. appErrs.numExemplarOutOfOrder += outOfOrderExemplars - level.Debug(sl.l).Log("msg", "Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1])) + sl.l.Debug("Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1])) sl.metrics.targetScrapeExemplarOutOfOrder.Add(float64(outOfOrderExemplars)) } if sl.appendMetadataToWAL && metadataChanged { if _, merr := app.UpdateMetadata(ref, lset, meta); merr != nil { // No need to fail the scrape on errors appending metadata. - level.Debug(sl.l).Log("msg", "Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", meta), "err", merr) + sl.l.Debug("Error when appending metadata in scrape loop", "ref", fmt.Sprintf("%d", ref), "metadata", fmt.Sprintf("%+v", meta), "err", merr) } } } @@ -1771,21 +1850,23 @@ loop: sl.metrics.targetScrapeNativeHistogramBucketLimit.Inc() } if appErrs.numOutOfOrder > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder) + sl.l.Warn("Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder) } if appErrs.numDuplicates > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting samples with different value but same timestamp", "num_dropped", appErrs.numDuplicates) + sl.l.Warn("Error on ingesting samples with different value but same timestamp", "num_dropped", appErrs.numDuplicates) } if appErrs.numOutOfBounds > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting samples that are too old or are too far into the future", "num_dropped", appErrs.numOutOfBounds) + sl.l.Warn("Error on ingesting samples that are too old or are too far into the future", "num_dropped", appErrs.numOutOfBounds) } if appErrs.numExemplarOutOfOrder > 0 { - level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder) + sl.l.Warn("Error on ingesting out-of-order exemplars", "num_dropped", appErrs.numExemplarOutOfOrder) } if err == nil { sl.cache.forEachStale(func(lset labels.Labels) bool { // Series no longer exposed, mark it stale. + app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true}) _, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN)) + app.SetOptions(nil) switch { case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): // Do not count these in logging, as this is expected if a target @@ -1808,17 +1889,17 @@ func (sl *scrapeLoop) checkAddError(met []byte, err error, sampleLimitErr, bucke return false, storage.ErrNotFound case errors.Is(err, storage.ErrOutOfOrderSample): appErrs.numOutOfOrder++ - level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met)) + sl.l.Debug("Out of order sample", "series", string(met)) sl.metrics.targetScrapeSampleOutOfOrder.Inc() return false, nil case errors.Is(err, storage.ErrDuplicateSampleForTimestamp): appErrs.numDuplicates++ - level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met)) + sl.l.Debug("Duplicate sample for timestamp", "series", string(met)) sl.metrics.targetScrapeSampleDuplicate.Inc() return false, nil case errors.Is(err, storage.ErrOutOfBounds): appErrs.numOutOfBounds++ - level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met)) + sl.l.Debug("Out of bounds metric", "series", string(met)) sl.metrics.targetScrapeSampleOutOfBounds.Inc() return false, nil case errors.Is(err, errSampleLimit): @@ -1891,7 +1972,7 @@ func (sl *scrapeLoop) report(app storage.Appender, start time.Time, duration tim func (sl *scrapeLoop) reportStale(app storage.Appender, start time.Time) (err error) { ts := timestamp.FromTime(start) - + app.SetOptions(&storage.AppendOptions{DiscardOutOfOrder: true}) stale := math.Float64frombits(value.StaleNaN) b := labels.NewBuilder(labels.EmptyLabels()) diff --git a/vendor/github.com/prometheus/prometheus/scrape/target.go b/vendor/github.com/prometheus/prometheus/scrape/target.go index 9ef4471fbd1..06d4737ff90 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/target.go +++ b/vendor/github.com/prometheus/prometheus/scrape/target.go @@ -17,7 +17,6 @@ import ( "errors" "fmt" "hash/fnv" - "net" "net/url" "strings" "sync" @@ -424,7 +423,7 @@ func (app *maxSchemaAppender) AppendHistogram(ref storage.SeriesRef, lset labels // PopulateLabels builds a label set from the given label set and scrape configuration. // It returns a label set before relabeling was applied as the second return value. // Returns the original discovered label set found before relabelling was applied if the target is dropped during relabeling. -func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort bool) (res, orig labels.Labels, err error) { +func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig) (res, orig labels.Labels, err error) { // Copy labels into the labelset for the target if they are not set already. scrapeLabels := []labels.Label{ {Name: model.JobLabel, Value: cfg.JobName}, @@ -441,8 +440,8 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort } // Encode scrape query parameters as labels. for k, v := range cfg.Params { - if len(v) > 0 { - lb.Set(model.ParamLabelPrefix+k, v[0]) + if name := model.ParamLabelPrefix + k; len(v) > 0 && lb.Get(name) == "" { + lb.Set(name, v[0]) } } @@ -457,51 +456,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("no address") } - // addPort checks whether we should add a default port to the address. - // If the address is not valid, we don't append a port either. - addPort := func(s string) (string, string, bool) { - // If we can split, a port exists and we don't have to add one. - if host, port, err := net.SplitHostPort(s); err == nil { - return host, port, false - } - // If adding a port makes it valid, the previous error - // was not due to an invalid address and we can append a port. - _, _, err := net.SplitHostPort(s + ":1234") - return "", "", err == nil - } - addr := lb.Get(model.AddressLabel) - scheme := lb.Get(model.SchemeLabel) - host, port, add := addPort(addr) - // If it's an address with no trailing port, infer it based on the used scheme - // unless the no-default-scrape-port feature flag is present. - if !noDefaultPort && add { - // Addresses reaching this point are already wrapped in [] if necessary. - switch scheme { - case "http", "": - addr += ":80" - case "https": - addr += ":443" - default: - return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("invalid scheme: %q", cfg.Scheme) - } - lb.Set(model.AddressLabel, addr) - } - - if noDefaultPort { - // If it's an address with a trailing default port and the - // no-default-scrape-port flag is present, remove the port. - switch port { - case "80": - if scheme == "http" { - lb.Set(model.AddressLabel, host) - } - case "443": - if scheme == "https" { - lb.Set(model.AddressLabel, host) - } - } - } if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil { return labels.EmptyLabels(), labels.EmptyLabels(), err @@ -557,7 +512,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort } // TargetsFromGroup builds targets based on the given TargetGroup and config. -func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefaultPort bool, targets []*Target, lb *labels.Builder) ([]*Target, []error) { +func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, targets []*Target, lb *labels.Builder) ([]*Target, []error) { targets = targets[:0] failures := []error{} @@ -573,7 +528,7 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefault } } - lset, origLabels, err := PopulateLabels(lb, cfg, noDefaultPort) + lset, origLabels, err := PopulateLabels(lb, cfg) if err != nil { failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err)) } diff --git a/vendor/github.com/prometheus/prometheus/storage/buffer.go b/vendor/github.com/prometheus/prometheus/storage/buffer.go index 4c9b004ab66..e847c10e61a 100644 --- a/vendor/github.com/prometheus/prometheus/storage/buffer.go +++ b/vendor/github.com/prometheus/prometheus/storage/buffer.go @@ -253,9 +253,9 @@ type sampleRing struct { delta int64 // Lookback buffers. We use iBuf for mixed samples, but one of the three - // concrete ones for homogenous samples. (Only one of the four bufs is + // concrete ones for homogeneous samples. (Only one of the four bufs is // allowed to be populated!) This avoids the overhead of the interface - // wrapper for the happy (and by far most common) case of homogenous + // wrapper for the happy (and by far most common) case of homogeneous // samples. iBuf []chunks.Sample fBuf []fSample @@ -280,7 +280,7 @@ const ( fhBuf ) -// newSampleRing creates a new sampleRing. If you do not know the prefereed +// newSampleRing creates a new sampleRing. If you do not know the preferred // value type yet, use a size of 0 (in which case the provided typ doesn't // matter). On the first add, a buffer of size 16 will be allocated with the // preferred type being the type of the first added sample. @@ -626,7 +626,7 @@ func addF(s fSample, buf []fSample, r *sampleRing) []fSample { return buf } -// addF adds an hSample to a (specialized) hSample buffer. +// addH adds an hSample to a (specialized) hSample buffer. func addH(s hSample, buf []hSample, r *sampleRing) []hSample { l := len(buf) // Grow the ring buffer if it fits no more elements. diff --git a/vendor/github.com/prometheus/prometheus/storage/fanout.go b/vendor/github.com/prometheus/prometheus/storage/fanout.go index e52342bc7ed..4d076788a7c 100644 --- a/vendor/github.com/prometheus/prometheus/storage/fanout.go +++ b/vendor/github.com/prometheus/prometheus/storage/fanout.go @@ -15,9 +15,8 @@ package storage import ( "context" + "log/slog" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" @@ -28,7 +27,7 @@ import ( ) type fanout struct { - logger log.Logger + logger *slog.Logger primary Storage secondaries []Storage @@ -43,7 +42,7 @@ type fanout struct { // and the error from the secondary querier will be returned as a warning. // // NOTE: In the case of Prometheus, it treats all remote storages as secondary / best effort. -func NewFanout(logger log.Logger, primary Storage, secondaries ...Storage) Storage { +func NewFanout(logger *slog.Logger, primary Storage, secondaries ...Storage) Storage { return &fanout{ logger: logger, primary: primary, @@ -142,12 +141,22 @@ func (f *fanout) Close() error { // fanoutAppender implements Appender. type fanoutAppender struct { - logger log.Logger + logger *slog.Logger primary Appender secondaries []Appender } +// SetOptions propagates the hints to both primary and secondary appenders. +func (f *fanoutAppender) SetOptions(opts *AppendOptions) { + if f.primary != nil { + f.primary.SetOptions(opts) + } + for _, appender := range f.secondaries { + appender.SetOptions(opts) + } +} + func (f *fanoutAppender) Append(ref SeriesRef, l labels.Labels, t int64, v float64) (SeriesRef, error) { ref, err := f.primary.Append(ref, l, t, v) if err != nil { @@ -190,6 +199,20 @@ func (f *fanoutAppender) AppendHistogram(ref SeriesRef, l labels.Labels, t int64 return ref, nil } +func (f *fanoutAppender) AppendHistogramCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) { + ref, err := f.primary.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh) + if err != nil { + return ref, err + } + + for _, appender := range f.secondaries { + if _, err := appender.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh); err != nil { + return 0, err + } + } + return ref, nil +} + func (f *fanoutAppender) UpdateMetadata(ref SeriesRef, l labels.Labels, m metadata.Metadata) (SeriesRef, error) { ref, err := f.primary.UpdateMetadata(ref, l, m) if err != nil { @@ -226,7 +249,7 @@ func (f *fanoutAppender) Commit() (err error) { err = appender.Commit() } else { if rollbackErr := appender.Rollback(); rollbackErr != nil { - level.Error(f.logger).Log("msg", "Squashed rollback error on commit", "err", rollbackErr) + f.logger.Error("Squashed rollback error on commit", "err", rollbackErr) } } } @@ -242,7 +265,7 @@ func (f *fanoutAppender) Rollback() (err error) { case err == nil: err = rollbackErr case rollbackErr != nil: - level.Error(f.logger).Log("msg", "Squashed rollback error on rollback", "err", rollbackErr) + f.logger.Error("Squashed rollback error on rollback", "err", rollbackErr) } } return nil diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index 8035caaa6d6..0634acb092d 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -50,7 +50,8 @@ var ( // NOTE(bwplotka): This can be both an instrumentation failure or commonly expected // behaviour, and we currently don't have a way to determine this. As a result // it's recommended to ignore this error for now. - ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring") + ErrOutOfOrderCT = fmt.Errorf("created timestamp out of order, ignoring") + ErrCTNewerThanSample = fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring") ) // SeriesRef is a generic series reference. In prometheus it is either a @@ -113,6 +114,8 @@ type Querier interface { LabelQuerier // Select returns a set of series that matches the given label matchers. + // Results are not checked whether they match. Results that do not match + // may cause undefined behavior. // Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance. // It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all. Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) SeriesSet @@ -151,6 +154,8 @@ type ChunkQuerier interface { LabelQuerier // Select returns a set of series that matches the given label matchers. + // Results are not checked whether they match. Results that do not match + // may cause undefined behavior. // Caller can specify if it requires returned series to be sorted. Prefer not requiring sorting for better performance. // It allows passing hints that can help in optimising select, but it's up to implementation how this is used if used at all. Select(ctx context.Context, sortSeries bool, hints *SelectHints, matchers ...*labels.Matcher) ChunkSeriesSet @@ -158,7 +163,7 @@ type ChunkQuerier interface { // LabelQuerier provides querying access over labels. type LabelQuerier interface { - // LabelValues returns all potential values for a label name. + // LabelValues returns all potential values for a label name in sorted order. // It is not safe to use the strings beyond the lifetime of the querier. // If matchers are specified the returned result set is reduced // to label values of metrics matching the matchers. @@ -238,6 +243,10 @@ func (f QueryableFunc) Querier(mint, maxt int64) (Querier, error) { return f(mint, maxt) } +type AppendOptions struct { + DiscardOutOfOrder bool +} + // Appender provides batched appends against a storage. // It must be completed with a call to Commit or Rollback and must not be reused afterwards. // @@ -266,6 +275,10 @@ type Appender interface { // Appender has to be discarded after rollback. Rollback() error + // SetOptions configures the appender with specific append options such as + // discarding out-of-order samples even if out-of-order is enabled in the TSDB. + SetOptions(opts *AppendOptions) + ExemplarAppender HistogramAppender MetadataUpdater @@ -313,6 +326,20 @@ type HistogramAppender interface { // pointer. AppendHistogram won't mutate the histogram, but in turn // depends on the caller to not mutate it either. AppendHistogram(ref SeriesRef, l labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) + // AppendHistogramCTZeroSample adds synthetic zero sample for the given ct timestamp, + // which will be associated with given series, labels and the incoming + // sample's t (timestamp). AppendHistogramCTZeroSample returns error if zero sample can't be + // appended, for example when ct is too old, or when it would collide with + // incoming sample (sample has priority). + // + // AppendHistogramCTZeroSample has to be called before the corresponding histogram AppendHistogram. + // A series reference number is returned which can be used to modify the + // CT for the given series in the same or later transactions. + // Returned reference numbers are ephemeral and may be rejected in calls + // to AppendHistogramCTZeroSample() at any point. + // + // If the reference is 0 it must not be used for caching. + AppendHistogramCTZeroSample(ref SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (SeriesRef, error) } // MetadataUpdater provides an interface for associating metadata to stored series. diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go index 318f22696ee..3144a0f648e 100644 --- a/vendor/github.com/prometheus/prometheus/storage/merge.go +++ b/vendor/github.com/prometheus/prometheus/storage/merge.go @@ -153,13 +153,18 @@ func (q *mergeGenericQuerier) Select(ctx context.Context, sortSeries bool, hints ) // Schedule all Selects for all queriers we know about. for _, querier := range q.queriers { + // copy the matchers as some queriers may alter the slice. + // See https://github.com/prometheus/prometheus/issues/14723 + matchersCopy := make([]*labels.Matcher, len(matchers)) + copy(matchersCopy, matchers) + wg.Add(1) - go func(qr genericQuerier) { + go func(qr genericQuerier, m []*labels.Matcher) { defer wg.Done() // We need to sort for NewMergeSeriesSet to work. - seriesSetChan <- qr.Select(ctx, true, hints, matchers...) - }(querier) + seriesSetChan <- qr.Select(ctx, true, hints, m...) + }(querier, matchersCopy) } go func() { wg.Wait() diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/client.go b/vendor/github.com/prometheus/prometheus/storage/remote/client.go index 62218cfba91..23775122e56 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/client.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/client.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "net/http" + "net/http/httptrace" "strconv" "strings" "time" @@ -31,6 +32,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/common/sigv4" "github.com/prometheus/common/version" + "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/trace" @@ -213,8 +215,11 @@ func NewWriteClient(name string, conf *ClientConfig) (WriteClient, error) { if conf.WriteProtoMsg != "" { writeProtoMsg = conf.WriteProtoMsg } - - httpClient.Transport = otelhttp.NewTransport(t) + httpClient.Transport = otelhttp.NewTransport( + t, + otelhttp.WithClientTrace(func(ctx context.Context) *httptrace.ClientTrace { + return otelhttptrace.NewClientTrace(ctx, otelhttptrace.WithoutSubSpans()) + })) return &Client{ remoteName: name, urlString: conf.URL.String(), diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go index fdcd668f565..9306dcb4c28 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/metadata_watcher.go @@ -16,11 +16,11 @@ package remote import ( "context" "errors" + "log/slog" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/scrape" ) @@ -44,7 +44,7 @@ func (noop *noopScrapeManager) Get() (*scrape.Manager, error) { // MetadataWatcher watches the Scrape Manager for a given WriteMetadataTo. type MetadataWatcher struct { name string - logger log.Logger + logger *slog.Logger managerGetter ReadyScrapeManager manager Watchable @@ -62,9 +62,9 @@ type MetadataWatcher struct { } // NewMetadataWatcher builds a new MetadataWatcher. -func NewMetadataWatcher(l log.Logger, mg ReadyScrapeManager, name string, w MetadataAppender, interval model.Duration, deadline time.Duration) *MetadataWatcher { +func NewMetadataWatcher(l *slog.Logger, mg ReadyScrapeManager, name string, w MetadataAppender, interval model.Duration, deadline time.Duration) *MetadataWatcher { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } if mg == nil { @@ -87,7 +87,7 @@ func NewMetadataWatcher(l log.Logger, mg ReadyScrapeManager, name string, w Meta // Start the MetadataWatcher. func (mw *MetadataWatcher) Start() { - level.Info(mw.logger).Log("msg", "Starting scraped metadata watcher") + mw.logger.Info("Starting scraped metadata watcher") mw.hardShutdownCtx, mw.hardShutdownCancel = context.WithCancel(context.Background()) mw.softShutdownCtx, mw.softShutdownCancel = context.WithCancel(mw.hardShutdownCtx) go mw.loop() @@ -95,15 +95,15 @@ func (mw *MetadataWatcher) Start() { // Stop the MetadataWatcher. func (mw *MetadataWatcher) Stop() { - level.Info(mw.logger).Log("msg", "Stopping metadata watcher...") - defer level.Info(mw.logger).Log("msg", "Scraped metadata watcher stopped") + mw.logger.Info("Stopping metadata watcher...") + defer mw.logger.Info("Scraped metadata watcher stopped") mw.softShutdownCancel() select { case <-mw.done: return case <-time.After(mw.deadline): - level.Error(mw.logger).Log("msg", "Failed to flush metadata") + mw.logger.Error("Failed to flush metadata") } mw.hardShutdownCancel() diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go index a112b9bbce2..c22c7613203 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go @@ -49,7 +49,7 @@ func NormalizeLabel(label string) string { // Return '_' for anything non-alphanumeric. func sanitizeRune(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) { + if unicode.IsLower(r) || unicode.IsUpper(r) || unicode.IsDigit(r) { return r } return '_' diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go index 0f472b80a09..36b647f510a 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -237,11 +237,13 @@ func removeSuffix(tokens []string, suffix string) []string { // Clean up specified string so it's Prometheus compliant func CleanUpString(s string) string { - return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) }), "_") + return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsUpper(r) && !unicode.IsLower(r) && !unicode.IsDigit(r) }), "_") } func RemovePromForbiddenRunes(s string) string { - return strings.Join(strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) && !unicode.IsDigit(r) && r != '_' && r != ':' }), "_") + return strings.Join(strings.FieldsFunc(s, func(r rune) bool { + return !unicode.IsUpper(r) && !unicode.IsLower(r) && !unicode.IsDigit(r) && r != '_' && r != ':' + }), "_") } // Retrieve the Prometheus "basic" unit corresponding to the specified "basic" unit diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 84cd309d6b9..08d72c52f44 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -21,6 +21,7 @@ import ( "encoding/hex" "fmt" "log" + "log/slog" "math" "slices" "sort" @@ -28,8 +29,6 @@ import ( "unicode/utf8" "github.com/cespare/xxhash/v2" - gokitlog "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -245,7 +244,7 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool { // However, work is under way to resolve this shortcoming through a feature called native histograms custom buckets: // https://github.com/prometheus/prometheus/issues/13485. func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPoints pmetric.HistogramDataPointSlice, - resource pcommon.Resource, settings Settings, baseName string, logger gokitlog.Logger) error { + resource pcommon.Resource, settings Settings, baseName string, logger *slog.Logger) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -337,7 +336,7 @@ func (c *PrometheusConverter) addHistogramDataPoints(ctx context.Context, dataPo labels := createLabels(baseName+createdSuffix, baseLabels) c.addTimeSeriesIfNeeded(labels, startTimestampMs, pt.Timestamp()) } - level.Debug(logger).Log("labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "histogram") + logger.Debug("addHistogramDataPoints", "labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "histogram") } return nil @@ -359,9 +358,17 @@ func getPromExemplars[T exemplarType](ctx context.Context, everyN *everyNTimes, exemplarRunes := 0 promExemplar := prompb.Exemplar{ - Value: exemplar.DoubleValue(), Timestamp: timestamp.FromTime(exemplar.Timestamp().AsTime()), } + switch exemplar.ValueType() { + case pmetric.ExemplarValueTypeInt: + promExemplar.Value = float64(exemplar.IntValue()) + case pmetric.ExemplarValueTypeDouble: + promExemplar.Value = exemplar.DoubleValue() + default: + return nil, fmt.Errorf("unsupported exemplar value type: %v", exemplar.ValueType()) + } + if traceID := exemplar.TraceID(); !traceID.IsEmpty() { val := hex.EncodeToString(traceID[:]) exemplarRunes += utf8.RuneCountInString(traceIDKey) + utf8.RuneCountInString(val) @@ -443,7 +450,7 @@ func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { } func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoints pmetric.SummaryDataPointSlice, resource pcommon.Resource, - settings Settings, baseName string, logger gokitlog.Logger) error { + settings Settings, baseName string, logger *slog.Logger) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -500,7 +507,7 @@ func (c *PrometheusConverter) addSummaryDataPoints(ctx context.Context, dataPoin c.addTimeSeriesIfNeeded(createdLabels, startTimestampMs, pt.Timestamp()) } - level.Debug(logger).Log("labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "summary") + logger.Debug("addSummaryDataPoints", "labels", labelsStringer(createLabels(baseName, baseLabels)), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "summary") } return nil @@ -584,7 +591,7 @@ const defaultIntervalForStartTimestamps = int64(300_000) // make use of its direct support fort Created Timestamps instead. // See https://opentelemetry.io/docs/specs/otel/metrics/data-model/#resets-and-gaps to know more about how OTel handles // resets for cumulative metrics. -func (c *PrometheusConverter) handleStartTime(startTs, ts int64, labels []prompb.Label, settings Settings, typ string, value float64, logger gokitlog.Logger) { +func (c *PrometheusConverter) handleStartTime(startTs, ts int64, labels []prompb.Label, settings Settings, typ string, value float64, logger *slog.Logger) { if !settings.EnableCreatedTimestampZeroIngestion { return } @@ -606,7 +613,7 @@ func (c *PrometheusConverter) handleStartTime(startTs, ts int64, labels []prompb return } - level.Debug(logger).Log("msg", "adding zero value at start_ts", "type", typ, "labels", labelsStringer(labels), "start_ts", startTs, "sample_ts", ts, "sample_value", value) + logger.Debug("adding zero value at start_ts", "type", typ, "labels", labelsStringer(labels), "start_ts", startTs, "sample_ts", ts, "sample_value", value) // See https://github.com/prometheus/prometheus/issues/14600 for context. c.addSample(&prompb.Sample{Timestamp: startTs}, labels) @@ -682,10 +689,10 @@ func addResourceTargetInfo(resource pcommon.Resource, settings Settings, timesta return } - ts := convertTimeStamp(timestamp) sample := &prompb.Sample{ - Value: float64(1), - Timestamp: ts, + Value: float64(1), + // convert ns to ms + Timestamp: convertTimeStamp(timestamp), } converter.addSample(sample, labels) } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index f12ba26f0f2..8349d4f9070 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -60,7 +60,6 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont promName, ) ts, _ := c.getOrCreateTimeSeries(lbls) - ts.Histograms = append(ts.Histograms, histogram) exemplars, err := getPromExemplars[pmetric.ExponentialHistogramDataPoint](ctx, &c.everyN, pt) @@ -73,8 +72,8 @@ func (c *PrometheusConverter) addExponentialHistogramDataPoints(ctx context.Cont return annots, nil } -// exponentialToNativeHistogram translates OTel Exponential Histogram data point -// to Prometheus Native Histogram. +// exponentialToNativeHistogram translates an OTel Exponential Histogram data point +// to a Prometheus Native Histogram. func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, annotations.Annotations, error) { var annots annotations.Annotations scale := p.Scale() diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index a74caabc581..a7f41a63164 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -20,11 +20,11 @@ import ( "context" "errors" "fmt" + "log/slog" "sort" "strings" "time" - "github.com/go-kit/log" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" "go.uber.org/multierr" @@ -67,7 +67,7 @@ func NewPrometheusConverter() *PrometheusConverter { } // FromMetrics converts pmetric.Metrics to Prometheus remote write format. -func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings, logger log.Logger) (annots annotations.Annotations, errs error) { +func (c *PrometheusConverter) FromMetrics(ctx context.Context, md pmetric.Metrics, settings Settings, logger *slog.Logger) (annots annotations.Annotations, errs error) { c.everyN = everyNTimes{n: 128} resourceMetricsSlice := md.ResourceMetrics() for i := 0; i < resourceMetricsSlice.Len(); i++ { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go index af0c7a478b7..30385f8fd2c 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go @@ -18,10 +18,9 @@ package prometheusremotewrite import ( "context" + "log/slog" "math" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/common/model" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/pmetric" @@ -47,9 +46,9 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data model.MetricNameLabel, name, ) - timestamp := convertTimeStamp(pt.Timestamp()) sample := &prompb.Sample{ - Timestamp: timestamp, + // convert ns to ms + Timestamp: convertTimeStamp(pt.Timestamp()), } switch pt.ValueType() { case pmetric.NumberDataPointValueTypeInt: @@ -67,7 +66,7 @@ func (c *PrometheusConverter) addGaugeNumberDataPoints(ctx context.Context, data } func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPoints pmetric.NumberDataPointSlice, - resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, logger log.Logger) error { + resource pcommon.Resource, metric pmetric.Metric, settings Settings, name string, logger *slog.Logger) error { for x := 0; x < dataPoints.Len(); x++ { if err := c.everyN.checkContext(ctx); err != nil { return err @@ -85,8 +84,8 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo model.MetricNameLabel, name, ) - sample := &prompb.Sample{ + // convert ns to ms Timestamp: timestamp, } switch pt.ValueType() { @@ -127,7 +126,7 @@ func (c *PrometheusConverter) addSumNumberDataPoints(ctx context.Context, dataPo } c.addTimeSeriesIfNeeded(createdLabels, startTimestampMs, pt.Timestamp()) } - level.Debug(logger).Log("labels", labelsStringer(lbls), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "sum") + logger.Debug("addSumNumberDataPoints", "labels", labelsStringer(lbls), "start_ts", startTimestampMs, "sample_ts", timestamp, "type", "sum") } return nil diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go index b1c8997268b..9f27c333a6d 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go @@ -17,17 +17,17 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "strconv" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" semconv "go.opentelemetry.io/otel/semconv/v1.21.0" @@ -407,7 +407,7 @@ type QueueManager struct { reshardDisableStartTimestamp atomic.Int64 // Time that reshard was disabled. reshardDisableEndTimestamp atomic.Int64 // Time that reshard is disabled until. - logger log.Logger + logger *slog.Logger flushDeadline time.Duration cfg config.QueueConfig mcfg config.MetadataConfig @@ -454,7 +454,7 @@ func NewQueueManager( metrics *queueManagerMetrics, watcherMetrics *wlog.WatcherMetrics, readerMetrics *wlog.LiveReaderMetrics, - logger log.Logger, + logger *slog.Logger, dir string, samplesIn *ewmaRate, cfg config.QueueConfig, @@ -471,7 +471,7 @@ func NewQueueManager( protoMsg config.RemoteWriteProtoMsg, ) *QueueManager { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } // Copy externalLabels into a slice, which we need for processExternalLabels. @@ -480,7 +480,7 @@ func NewQueueManager( extLabelsSlice = append(extLabelsSlice, l) }) - logger = log.With(logger, remoteName, client.Name(), endpoint, client.Endpoint()) + logger = logger.With(remoteName, client.Name(), endpoint, client.Endpoint()) t := &QueueManager{ logger: logger, flushDeadline: flushDeadline, @@ -526,7 +526,7 @@ func NewQueueManager( // ships them alongside series. If both mechanisms are set, the new one // takes precedence by implicitly disabling the older one. if t.mcfg.Send && t.protoMsg != config.RemoteWriteProtoMsgV1 { - level.Warn(logger).Log("msg", "usage of 'metadata_config.send' is redundant when using remote write v2 (or higher) as metadata will always be gathered from the WAL and included for every series within each write request") + logger.Warn("usage of 'metadata_config.send' is redundant when using remote write v2 (or higher) as metadata will always be gathered from the WAL and included for every series within each write request") t.mcfg.Send = false } @@ -567,7 +567,7 @@ func (t *QueueManager) AppendWatcherMetadata(ctx context.Context, metadata []scr err := t.sendMetadataWithBackoff(ctx, mm[i*t.mcfg.MaxSamplesPerSend:last], pBuf) if err != nil { t.metrics.failedMetadataTotal.Add(float64(last - (i * t.mcfg.MaxSamplesPerSend))) - level.Error(t.logger).Log("msg", "non-recoverable error while sending metadata", "count", last-(i*t.mcfg.MaxSamplesPerSend), "err", err) + t.logger.Error("non-recoverable error while sending metadata", "count", last-(i*t.mcfg.MaxSamplesPerSend), "err", err) } } } @@ -706,7 +706,7 @@ outer: if !ok { t.dataDropped.incr(1) if _, ok := t.droppedSeries[s.Ref]; !ok { - level.Info(t.logger).Log("msg", "Dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref) + t.logger.Info("Dropped sample for series that was not explicitly dropped via relabelling", "ref", s.Ref) t.metrics.droppedSamplesTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedSamplesTotal.WithLabelValues(reasonDroppedSeries).Inc() @@ -769,7 +769,7 @@ outer: // Track dropped exemplars in the same EWMA for sharding calc. t.dataDropped.incr(1) if _, ok := t.droppedSeries[e.Ref]; !ok { - level.Info(t.logger).Log("msg", "Dropped exemplar for series that was not explicitly dropped via relabelling", "ref", e.Ref) + t.logger.Info("Dropped exemplar for series that was not explicitly dropped via relabelling", "ref", e.Ref) t.metrics.droppedExemplarsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedExemplarsTotal.WithLabelValues(reasonDroppedSeries).Inc() @@ -825,7 +825,7 @@ outer: if !ok { t.dataDropped.incr(1) if _, ok := t.droppedSeries[h.Ref]; !ok { - level.Info(t.logger).Log("msg", "Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) + t.logger.Info("Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) t.metrics.droppedHistogramsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedHistogramsTotal.WithLabelValues(reasonDroppedSeries).Inc() @@ -880,7 +880,7 @@ outer: if !ok { t.dataDropped.incr(1) if _, ok := t.droppedSeries[h.Ref]; !ok { - level.Info(t.logger).Log("msg", "Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) + t.logger.Info("Dropped histogram for series that was not explicitly dropped via relabelling", "ref", h.Ref) t.metrics.droppedHistogramsTotal.WithLabelValues(reasonUnintentionalDroppedSeries).Inc() } else { t.metrics.droppedHistogramsTotal.WithLabelValues(reasonDroppedSeries).Inc() @@ -944,8 +944,8 @@ func (t *QueueManager) Start() { // Stop stops sending samples to the remote storage and waits for pending // sends to complete. func (t *QueueManager) Stop() { - level.Info(t.logger).Log("msg", "Stopping remote storage...") - defer level.Info(t.logger).Log("msg", "Remote storage stopped.") + t.logger.Info("Stopping remote storage...") + defer t.logger.Info("Remote storage stopped.") close(t.quit) t.wg.Wait() @@ -1093,10 +1093,10 @@ func (t *QueueManager) updateShardsLoop() { // to stay close to shardUpdateDuration. select { case t.reshardChan <- desiredShards: - level.Info(t.logger).Log("msg", "Remote storage resharding", "from", t.numShards, "to", desiredShards) + t.logger.Info("Remote storage resharding", "from", t.numShards, "to", desiredShards) t.numShards = desiredShards default: - level.Info(t.logger).Log("msg", "Currently resharding, skipping.") + t.logger.Info("Currently resharding, skipping.") } case <-t.quit: return @@ -1114,14 +1114,14 @@ func (t *QueueManager) shouldReshard(desiredShards int) bool { minSendTimestamp := time.Now().Add(-1 * shardUpdateDuration).Unix() lsts := t.lastSendTimestamp.Load() if lsts < minSendTimestamp { - level.Warn(t.logger).Log("msg", "Skipping resharding, last successful send was beyond threshold", "lastSendTimestamp", lsts, "minSendTimestamp", minSendTimestamp) + t.logger.Warn("Skipping resharding, last successful send was beyond threshold", "lastSendTimestamp", lsts, "minSendTimestamp", minSendTimestamp) return false } if disableTimestamp := t.reshardDisableEndTimestamp.Load(); time.Now().Unix() < disableTimestamp { disabledAt := time.Unix(t.reshardDisableStartTimestamp.Load(), 0) disabledFor := time.Until(time.Unix(disableTimestamp, 0)) - level.Warn(t.logger).Log("msg", "Skipping resharding, resharding is disabled while waiting for recoverable errors", "disabled_at", disabledAt, "disabled_for", disabledFor) + t.logger.Warn("Skipping resharding, resharding is disabled while waiting for recoverable errors", "disabled_at", disabledAt, "disabled_for", disabledFor) return false } return true @@ -1164,7 +1164,7 @@ func (t *QueueManager) calculateDesiredShards() int { desiredShards = timePerSample * (dataInRate*dataKeptRatio + backlogCatchup) ) t.metrics.desiredNumShards.Set(desiredShards) - level.Debug(t.logger).Log("msg", "QueueManager.calculateDesiredShards", + t.logger.Debug("QueueManager.calculateDesiredShards", "dataInRate", dataInRate, "dataOutRate", dataOutRate, "dataKeptRatio", dataKeptRatio, @@ -1182,7 +1182,7 @@ func (t *QueueManager) calculateDesiredShards() int { lowerBound = float64(t.numShards) * (1. - shardToleranceFraction) upperBound = float64(t.numShards) * (1. + shardToleranceFraction) ) - level.Debug(t.logger).Log("msg", "QueueManager.updateShardsLoop", + t.logger.Debug("QueueManager.updateShardsLoop", "lowerBound", lowerBound, "desiredShards", desiredShards, "upperBound", upperBound) desiredShards = math.Ceil(desiredShards) // Round up to be on the safe side. @@ -1193,7 +1193,7 @@ func (t *QueueManager) calculateDesiredShards() int { numShards := int(desiredShards) // Do not downshard if we are more than ten seconds back. if numShards < t.numShards && delay > 10.0 { - level.Debug(t.logger).Log("msg", "Not downsharding due to being too far behind") + t.logger.Debug("Not downsharding due to being too far behind") return t.numShards } @@ -1321,7 +1321,7 @@ func (s *shards) stop() { // Log error for any dropped samples, exemplars, or histograms. logDroppedError := func(t string, counter atomic.Uint32) { if dropped := counter.Load(); dropped > 0 { - level.Error(s.qm.logger).Log("msg", fmt.Sprintf("Failed to flush all %s on shutdown", t), "count", dropped) + s.qm.logger.Error(fmt.Sprintf("Failed to flush all %s on shutdown", t), "count", dropped) } } logDroppedError("samples", s.samplesDroppedOnHardShutdown) @@ -1564,7 +1564,7 @@ func (s *shards) runShard(ctx context.Context, shardID int, queue *queue) { nPendingSamples, nPendingExemplars, nPendingHistograms := populateTimeSeries(batch, pendingData, s.qm.sendExemplars, s.qm.sendNativeHistograms) n := nPendingSamples + nPendingExemplars + nPendingHistograms if timer { - level.Debug(s.qm.logger).Log("msg", "runShard timer ticked, sending buffered data", "samples", nPendingSamples, + s.qm.logger.Debug("runShard timer ticked, sending buffered data", "samples", nPendingSamples, "exemplars", nPendingExemplars, "shard", shardNum, "histograms", nPendingHistograms) } _ = s.sendSamples(ctx, pendingData[:n], nPendingSamples, nPendingExemplars, nPendingHistograms, pBuf, &buf, enc) @@ -1691,9 +1691,9 @@ func (s *shards) updateMetrics(_ context.Context, err error, sampleCount, exempl s.qm.metrics.failedExemplarsTotal.Add(float64(exemplarDiff)) } if err != nil { - level.Error(s.qm.logger).Log("msg", "non-recoverable error", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff, "err", err) + s.qm.logger.Error("non-recoverable error", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff, "err", err) } else if sampleDiff+exemplarDiff+histogramDiff > 0 { - level.Error(s.qm.logger).Log("msg", "we got 2xx status code from the Receiver yet statistics indicate some dat was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff) + s.qm.logger.Error("we got 2xx status code from the Receiver yet statistics indicate some dat was not written; investigation needed", "failedSampleCount", sampleDiff, "failedHistogramCount", histogramDiff, "failedExemplarCount", exemplarDiff) } // These counters are used to calculate the dynamic sharding, and as such @@ -2018,16 +2018,16 @@ func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt switch { case backoffErr.retryAfter > 0: sleepDuration = backoffErr.retryAfter - level.Info(t.logger).Log("msg", "Retrying after duration specified by Retry-After header", "duration", sleepDuration) + t.logger.Info("Retrying after duration specified by Retry-After header", "duration", sleepDuration) case backoffErr.retryAfter < 0: - level.Debug(t.logger).Log("msg", "retry-after cannot be in past, retrying using default backoff mechanism") + t.logger.Debug("retry-after cannot be in past, retrying using default backoff mechanism") } // We should never reshard for a recoverable error; increasing shards could // make the problem worse, particularly if we're getting rate limited. // // reshardDisableTimestamp holds the unix timestamp until which resharding - // is diableld. We'll update that timestamp if the period we were just told + // is disabled. We'll update that timestamp if the period we were just told // to sleep for is newer than the existing disabled timestamp. reshardWaitPeriod := time.Now().Add(time.Duration(sleepDuration) * 2) if oldTS, updated := setAtomicToNewer(&t.reshardDisableEndTimestamp, reshardWaitPeriod.Unix()); updated { @@ -2047,7 +2047,7 @@ func (t *QueueManager) sendWriteRequestWithBackoff(ctx context.Context, attempt // If we make it this far, we've encountered a recoverable error and will retry. onRetry() - level.Warn(t.logger).Log("msg", "Failed to send batch, retrying", "err", err) + t.logger.Warn("Failed to send batch, retrying", "err", err) backoff = sleepDuration * 2 @@ -2147,12 +2147,12 @@ func compressPayload(tmpbuf *[]byte, inp []byte, enc Compression) (compressed [] } } -func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf *[]byte, filter func(prompb.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { +func buildWriteRequest(logger *slog.Logger, timeSeries []prompb.TimeSeries, metadata []prompb.MetricMetadata, pBuf *proto.Buffer, buf *[]byte, filter func(prompb.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms := buildTimeSeries(timeSeries, filter) if droppedSamples > 0 || droppedExemplars > 0 || droppedHistograms > 0 { - level.Debug(logger).Log("msg", "dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) + logger.Debug("dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) } req := &prompb.WriteRequest{ @@ -2185,11 +2185,11 @@ func buildWriteRequest(logger log.Logger, timeSeries []prompb.TimeSeries, metada return compressed, highest, lowest, nil } -func buildV2WriteRequest(logger log.Logger, samples []writev2.TimeSeries, labels []string, pBuf, buf *[]byte, filter func(writev2.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { +func buildV2WriteRequest(logger *slog.Logger, samples []writev2.TimeSeries, labels []string, pBuf, buf *[]byte, filter func(writev2.TimeSeries) bool, enc Compression) (compressed []byte, highest, lowest int64, _ error) { highest, lowest, timeSeries, droppedSamples, droppedExemplars, droppedHistograms := buildV2TimeSeries(samples, filter) if droppedSamples > 0 || droppedExemplars > 0 || droppedHistograms > 0 { - level.Debug(logger).Log("msg", "dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) + logger.Debug("dropped data due to their age", "droppedSamples", droppedSamples, "droppedExemplars", droppedExemplars, "droppedHistograms", droppedHistograms) } req := &writev2.Request{ diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go index ffc64c9c3fb..8f2945f9740 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go @@ -16,13 +16,12 @@ package remote import ( "context" "errors" + "log/slog" "net/http" "slices" "strings" "sync" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/config" @@ -34,7 +33,7 @@ import ( ) type readHandler struct { - logger log.Logger + logger *slog.Logger queryable storage.SampleAndChunkQueryable config func() config.Config remoteReadSampleLimit int @@ -46,7 +45,7 @@ type readHandler struct { // NewReadHandler creates a http.Handler that accepts remote read requests and // writes them to the provided queryable. -func NewReadHandler(logger log.Logger, r prometheus.Registerer, queryable storage.SampleAndChunkQueryable, config func() config.Config, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame int) http.Handler { +func NewReadHandler(logger *slog.Logger, r prometheus.Registerer, queryable storage.SampleAndChunkQueryable, config func() config.Config, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame int) http.Handler { h := &readHandler{ logger: logger, queryable: queryable, @@ -140,7 +139,7 @@ func (h *readHandler) remoteReadSamples( } defer func() { if err := querier.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error on querier close", "err", err.Error()) + h.logger.Warn("Error on querier close", "err", err.Error()) } }() @@ -163,7 +162,7 @@ func (h *readHandler) remoteReadSamples( return err } for _, w := range ws { - level.Warn(h.logger).Log("msg", "Warnings on remote read query", "err", w.Error()) + h.logger.Warn("Warnings on remote read query", "err", w.Error()) } for _, ts := range resp.Results[i].Timeseries { ts.Labels = MergeLabels(ts.Labels, sortedExternalLabels) @@ -208,7 +207,7 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re } defer func() { if err := querier.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error on chunk querier close", "err", err.Error()) + h.logger.Warn("Error on chunk querier close", "err", err.Error()) } }() @@ -239,7 +238,7 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re } for _, w := range ws { - level.Warn(h.logger).Log("msg", "Warnings on chunked remote read query", "warnings", w.Error()) + h.logger.Warn("Warnings on chunked remote read query", "warnings", w.Error()) } return nil }(); err != nil { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go index 05634f1798f..14c3c87d936 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go @@ -18,12 +18,13 @@ import ( "crypto/md5" "encoding/hex" "fmt" + "log/slog" "sync" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/config" @@ -51,8 +52,9 @@ type startTimeCallback func() (int64, error) // Storage represents all the remote read and write endpoints. It implements // storage.Storage. type Storage struct { - logger *logging.Deduper - mtx sync.Mutex + deduper *logging.Deduper + logger *slog.Logger + mtx sync.Mutex rws *WriteStorage @@ -62,14 +64,16 @@ type Storage struct { } // NewStorage returns a remote.Storage. -func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWAL bool) *Storage { +func NewStorage(l *slog.Logger, reg prometheus.Registerer, stCallback startTimeCallback, walDir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWAL bool) *Storage { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } - logger := logging.Dedupe(l, 1*time.Minute) + deduper := logging.Dedupe(l, 1*time.Minute) + logger := slog.New(deduper) s := &Storage{ logger: logger, + deduper: deduper, localStartTimeCallback: stCallback, } s.rws = NewWriteStorage(s.logger, reg, walDir, flushDeadline, sm, metadataInWAL) @@ -196,7 +200,7 @@ func (s *Storage) LowestSentTimestamp() int64 { // Close the background processing of the storage queues. func (s *Storage) Close() error { - s.logger.Stop() + s.deduper.Stop() s.mtx.Lock() defer s.mtx.Unlock() return s.rws.Close() diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write.go b/vendor/github.com/prometheus/prometheus/storage/remote/write.go index 3d2f1fdfcdb..639f3445209 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write.go @@ -17,13 +17,14 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "sync" "time" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" @@ -57,7 +58,7 @@ var ( // WriteStorage represents all the remote write storage. type WriteStorage struct { - logger log.Logger + logger *slog.Logger reg prometheus.Registerer mtx sync.Mutex @@ -78,9 +79,9 @@ type WriteStorage struct { } // NewWriteStorage creates and runs a WriteStorage. -func NewWriteStorage(logger log.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWal bool) *WriteStorage { +func NewWriteStorage(logger *slog.Logger, reg prometheus.Registerer, dir string, flushDeadline time.Duration, sm ReadyScrapeManager, metadataInWal bool) *WriteStorage { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } rws := &WriteStorage{ queues: make(map[string]*QueueManager), @@ -277,6 +278,7 @@ func (rws *WriteStorage) Close() error { type timestampTracker struct { writeStorage *WriteStorage + appendOptions *storage.AppendOptions samples int64 exemplars int64 histograms int64 @@ -284,6 +286,10 @@ type timestampTracker struct { highestRecvTimestamp *maxTimestamp } +func (t *timestampTracker) SetOptions(opts *storage.AppendOptions) { + t.appendOptions = opts +} + // Append implements storage.Appender. func (t *timestampTracker) Append(_ storage.SeriesRef, _ labels.Labels, ts int64, _ float64) (storage.SeriesRef, error) { t.samples++ @@ -306,14 +312,29 @@ func (t *timestampTracker) AppendHistogram(_ storage.SeriesRef, _ labels.Labels, return 0, nil } -func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { - // TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write. - // UpadteMetadata is no-op for remote write (where timestampTracker is being used) for now. +func (t *timestampTracker) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, ct int64) (storage.SeriesRef, error) { + t.samples++ + if ct > t.highestTimestamp { + // Theoretically, we should never see a CT zero sample with a timestamp higher than the highest timestamp we've seen so far. + // However, we're not going to enforce that here, as it is not the responsibility of the tracker to enforce this. + t.highestTimestamp = ct + } return 0, nil } -func (t *timestampTracker) AppendCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, _ int64) (storage.SeriesRef, error) { - // AppendCTZeroSample is no-op for remote-write for now. +func (t *timestampTracker) AppendHistogramCTZeroSample(_ storage.SeriesRef, _ labels.Labels, _, ct int64, _ *histogram.Histogram, _ *histogram.FloatHistogram) (storage.SeriesRef, error) { + t.histograms++ + if ct > t.highestTimestamp { + // Theoretically, we should never see a CT zero sample with a timestamp higher than the highest timestamp we've seen so far. + // However, we're not going to enforce that here, as it is not the responsibility of the tracker to enforce this. + t.highestTimestamp = ct + } + return 0, nil +} + +func (t *timestampTracker) UpdateMetadata(_ storage.SeriesRef, _ labels.Labels, _ metadata.Metadata) (storage.SeriesRef, error) { + // TODO: Add and increment a `metadata` field when we get around to wiring metadata in remote_write. + // UpdateMetadata is no-op for remote write (where timestampTracker is being used) for now. return 0, nil } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index 43c72a3571e..99e4392ff5e 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -18,12 +18,11 @@ import ( "errors" "fmt" "io" + "log/slog" "net/http" "strings" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/prometheus/client_golang/prometheus" @@ -42,7 +41,7 @@ import ( ) type writeHandler struct { - logger log.Logger + logger *slog.Logger appendable storage.Appendable samplesWithInvalidLabelsTotal prometheus.Counter @@ -58,7 +57,7 @@ const maxAheadTime = 10 * time.Minute // // NOTE(bwplotka): When accepting v2 proto and spec, partial writes are possible // as per https://prometheus.io/docs/specs/remote_write_spec_2_0/#partial-write. -func NewWriteHandler(logger log.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler { +func NewWriteHandler(logger *slog.Logger, reg prometheus.Registerer, appendable storage.Appendable, acceptedProtoMsgs []config.RemoteWriteProtoMsg) http.Handler { protoMsgs := map[config.RemoteWriteProtoMsg]struct{}{} for _, acc := range acceptedProtoMsgs { protoMsgs[acc] = struct{}{} @@ -119,7 +118,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { msgType, err := h.parseProtoMsg(contentType) if err != nil { - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err) + h.logger.Error("Error decoding remote write request", "err", err) http.Error(w, err.Error(), http.StatusUnsupportedMediaType) return } @@ -131,7 +130,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } return ret }()) - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err) + h.logger.Error("Error decoding remote write request", "err", err) http.Error(w, err.Error(), http.StatusUnsupportedMediaType) } @@ -142,14 +141,14 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // We could give http.StatusUnsupportedMediaType, but let's assume snappy by default. } else if enc != string(SnappyBlockCompression) { err := fmt.Errorf("%v encoding (compression) is not accepted by this server; only %v is acceptable", enc, SnappyBlockCompression) - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err) + h.logger.Error("Error decoding remote write request", "err", err) http.Error(w, err.Error(), http.StatusUnsupportedMediaType) } // Read the request body. body, err := io.ReadAll(r.Body) if err != nil { - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error()) + h.logger.Error("Error decoding remote write request", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -157,7 +156,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { decompressed, err := snappy.Decode(nil, body) if err != nil { // TODO(bwplotka): Add more context to responded error? - level.Error(h.logger).Log("msg", "Error decompressing remote write request", "err", err.Error()) + h.logger.Error("Error decompressing remote write request", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -169,7 +168,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var req prompb.WriteRequest if err := proto.Unmarshal(decompressed, &req); err != nil { // TODO(bwplotka): Add more context to responded error? - level.Error(h.logger).Log("msg", "Error decoding v1 remote write request", "protobuf_message", msgType, "err", err.Error()) + h.logger.Error("Error decoding v1 remote write request", "protobuf_message", msgType, "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -180,7 +179,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return default: - level.Error(h.logger).Log("msg", "Error while remote writing the v1 request", "err", err.Error()) + h.logger.Error("Error while remote writing the v1 request", "err", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -193,7 +192,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var req writev2.Request if err := proto.Unmarshal(decompressed, &req); err != nil { // TODO(bwplotka): Add more context to responded error? - level.Error(h.logger).Log("msg", "Error decoding v2 remote write request", "protobuf_message", msgType, "err", err.Error()) + h.logger.Error("Error decoding v2 remote write request", "protobuf_message", msgType, "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -205,7 +204,7 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if err != nil { if errHTTPCode/5 == 100 { // 5xx - level.Error(h.logger).Log("msg", "Error while remote writing the v2 request", "err", err.Error()) + h.logger.Error("Error while remote writing the v2 request", "err", err.Error()) } http.Error(w, err.Error(), errHTTPCode) return @@ -241,11 +240,11 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err // TODO(bwplotka): Even as per 1.0 spec, this should be a 400 error, while other samples are // potentially written. Perhaps unify with fixed writeV2 implementation a bit. if !ls.Has(labels.MetricName) || !ls.IsValid(model.NameValidationScheme) { - level.Warn(h.logger).Log("msg", "Invalid metric names or labels", "got", ls.String()) + h.logger.Warn("Invalid metric names or labels", "got", ls.String()) samplesWithInvalidLabels++ continue } else if duplicateLabel, hasDuplicate := ls.HasDuplicateLabelNames(); hasDuplicate { - level.Warn(h.logger).Log("msg", "Invalid labels for series.", "labels", ls.String(), "duplicated_label", duplicateLabel) + h.logger.Warn("Invalid labels for series.", "labels", ls.String(), "duplicated_label", duplicateLabel) samplesWithInvalidLabels++ continue } @@ -261,10 +260,10 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err switch { case errors.Is(err, storage.ErrOutOfOrderExemplar): outOfOrderExemplarErrs++ - level.Debug(h.logger).Log("msg", "Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + h.logger.Debug("Out of order exemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) default: // Since exemplar storage is still experimental, we don't fail the request on ingestion errors - level.Debug(h.logger).Log("msg", "Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err) + h.logger.Debug("Error while adding exemplar in AppendExemplar", "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e), "err", err) } } } @@ -276,7 +275,7 @@ func (h *writeHandler) write(ctx context.Context, req *prompb.WriteRequest) (err } if outOfOrderExemplarErrs > 0 { - _ = level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) + h.logger.Warn("Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) } if samplesWithInvalidLabels > 0 { h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels)) @@ -293,7 +292,7 @@ func (h *writeHandler) appendV1Samples(app storage.Appender, ss []prompb.Sample, if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { - level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp) + h.logger.Error("Out of order sample from remote write", "err", err.Error(), "series", labels.String(), "timestamp", s.Timestamp) } return err } @@ -315,7 +314,7 @@ func (h *writeHandler) appendV1Histograms(app storage.Appender, hh []prompb.Hist if errors.Is(err, storage.ErrOutOfOrderSample) || errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { - level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp) + h.logger.Error("Out of order histogram from remote write", "err", err.Error(), "series", labels.String(), "timestamp", hp.Timestamp) } return err } @@ -345,7 +344,7 @@ func (h *writeHandler) writeV2(ctx context.Context, req *writev2.Request) (_ Wri // On 5xx, we always rollback, because we expect // sender to retry and TSDB is not idempotent. if rerr := app.Rollback(); rerr != nil { - level.Error(h.logger).Log("msg", "writev2 rollback failed on retry-able error", "err", rerr) + h.logger.Error("writev2 rollback failed on retry-able error", "err", rerr) } return WriteResponseStats{}, errHTTPCode, err } @@ -407,7 +406,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * errors.Is(err, storage.ErrDuplicateSampleForTimestamp) || errors.Is(err, storage.ErrTooOldSample) { // TODO(bwplotka): Not too spammy log? - level.Error(h.logger).Log("msg", "Out of order sample from remote write", "err", err.Error(), "series", ls.String(), "timestamp", s.Timestamp) + h.logger.Error("Out of order sample from remote write", "err", err.Error(), "series", ls.String(), "timestamp", s.Timestamp) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } @@ -432,7 +431,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * errors.Is(err, storage.ErrOutOfBounds) || errors.Is(err, storage.ErrDuplicateSampleForTimestamp) { // TODO(bwplotka): Not too spammy log? - level.Error(h.logger).Log("msg", "Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp) + h.logger.Error("Out of order histogram from remote write", "err", err.Error(), "series", ls.String(), "timestamp", hp.Timestamp) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } @@ -450,18 +449,18 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // Handle append error. if errors.Is(err, storage.ErrOutOfOrderExemplar) { outOfOrderExemplarErrs++ // Maintain old metrics, but technically not needed, given we fail here. - level.Error(h.logger).Log("msg", "Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + h.logger.Error("Out of order exemplar", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) badRequestErrs = append(badRequestErrs, fmt.Errorf("%w for series %v", err, ls.String())) continue } // TODO(bwplotka): Add strict mode which would trigger rollback of everything if needed. // For now we keep the previously released flow (just error not debug leve) of dropping them without rollback and 5xx. - level.Error(h.logger).Log("msg", "failed to ingest exemplar, emitting error log, but no error for PRW caller", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) + h.logger.Error("failed to ingest exemplar, emitting error log, but no error for PRW caller", "err", err.Error(), "series", ls.String(), "exemplar", fmt.Sprintf("%+v", e)) } m := ts.ToMetadata(req.Symbols) if _, err = app.UpdateMetadata(ref, ls, m); err != nil { - level.Debug(h.logger).Log("msg", "error while updating metadata from remote write", "err", err) + h.logger.Debug("error while updating metadata from remote write", "err", err) // Metadata is attached to each series, so since Prometheus does not reject sample without metadata information, // we don't report remote write error either. We increment metric instead. samplesWithoutMetadata += rs.AllSamples() - allSamplesSoFar @@ -469,7 +468,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * } if outOfOrderExemplarErrs > 0 { - level.Warn(h.logger).Log("msg", "Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) + h.logger.Warn("Error on ingesting out-of-order exemplars", "num_dropped", outOfOrderExemplarErrs) } h.samplesWithInvalidLabelsTotal.Add(float64(samplesWithInvalidLabels)) @@ -482,7 +481,7 @@ func (h *writeHandler) appendV2(app storage.Appender, req *writev2.Request, rs * // NewOTLPWriteHandler creates a http.Handler that accepts OTLP write requests and // writes them to the provided appendable. -func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable, configFunc func() config.Config, enableCTZeroIngestion bool, validIntervalCTZeroIngestion time.Duration) http.Handler { +func NewOTLPWriteHandler(logger *slog.Logger, appendable storage.Appendable, configFunc func() config.Config, enableCTZeroIngestion bool, validIntervalCTZeroIngestion time.Duration) http.Handler { rwHandler := &writeHandler{ logger: logger, appendable: appendable, @@ -498,7 +497,7 @@ func NewOTLPWriteHandler(logger log.Logger, appendable storage.Appendable, confi } type otlpWriteHandler struct { - logger log.Logger + logger *slog.Logger rwHandler *writeHandler configFunc func() config.Config enableCTZeroIngestion bool @@ -508,7 +507,7 @@ type otlpWriteHandler struct { func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { req, err := DecodeOTLPWriteRequest(r) if err != nil { - level.Error(h.logger).Log("msg", "Error decoding remote write request", "err", err.Error()) + h.logger.Error("Error decoding remote write request", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -523,11 +522,11 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ValidIntervalCreatedTimestampZeroIngestion: h.validIntervalCTZeroIngestion, }, h.logger) if err != nil { - level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", err) + h.logger.Warn("Error translating OTLP metrics to Prometheus write request", "err", err) } ws, _ := annots.AsStrings("", 0, 0) if len(ws) > 0 { - level.Warn(h.logger).Log("msg", "Warnings translating OTLP metrics to Prometheus write request", "warnings", ws) + h.logger.Warn("Warnings translating OTLP metrics to Prometheus write request", "warnings", ws) } err = h.rwHandler.write(r.Context(), &prompb.WriteRequest{ @@ -541,7 +540,7 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadRequest) return default: - level.Error(h.logger).Log("msg", "Error appending remote write", "err", err.Error()) + h.logger.Error("Error appending remote write", "err", err.Error()) http.Error(w, err.Error(), http.StatusInternalServerError) return } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index 20d66763c3e..bfb04c3f435 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -20,16 +20,17 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "slices" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" + "github.com/prometheus/common/promslog" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -282,7 +283,7 @@ func readMetaFile(dir string) (*BlockMeta, int64, error) { return &m, int64(len(b)), nil } -func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) (int64, error) { +func writeMetaFile(logger *slog.Logger, dir string, meta *BlockMeta) (int64, error) { meta.Version = metaVersion1 // Make any changes to the file appear atomic. @@ -290,7 +291,7 @@ func writeMetaFile(logger log.Logger, dir string, meta *BlockMeta) (int64, error tmp := path + ".tmp" defer func() { if err := os.RemoveAll(tmp); err != nil { - level.Error(logger).Log("msg", "remove tmp file", "err", err.Error()) + logger.Error("remove tmp file", "err", err.Error()) } }() @@ -336,7 +337,7 @@ type Block struct { indexr IndexReader tombstones tombstones.Reader - logger log.Logger + logger *slog.Logger numBytesChunks int64 numBytesIndex int64 @@ -346,14 +347,14 @@ type Block struct { // OpenBlock opens the block in the directory. It can be passed a chunk pool, which is used // to instantiate chunk structs. -func OpenBlock(logger log.Logger, dir string, pool chunkenc.Pool) (pb *Block, err error) { +func OpenBlock(logger *slog.Logger, dir string, pool chunkenc.Pool) (pb *Block, err error) { return OpenBlockWithOptions(logger, dir, pool, nil, DefaultPostingsForMatchersCacheTTL, DefaultPostingsForMatchersCacheMaxItems, DefaultPostingsForMatchersCacheMaxBytes, DefaultPostingsForMatchersCacheForce) } // OpenBlockWithOptions is like OpenBlock but allows to pass a cache provider and sharding function. -func OpenBlockWithOptions(logger log.Logger, dir string, pool chunkenc.Pool, cache index.ReaderCacheProvider, postingsCacheTTL time.Duration, postingsCacheMaxItems int, postingsCacheMaxBytes int64, postingsCacheForce bool) (pb *Block, err error) { +func OpenBlockWithOptions(logger *slog.Logger, dir string, pool chunkenc.Pool, cache index.ReaderCacheProvider, postingsCacheTTL time.Duration, postingsCacheMaxItems int, postingsCacheMaxBytes int64, postingsCacheForce bool) (pb *Block, err error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } var closers []io.Closer defer func() { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go index 232ec2b9148..63f82e28df0 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go @@ -17,11 +17,10 @@ import ( "context" "errors" "fmt" + "log/slog" "math" "os" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/prometheus/model/timestamp" @@ -31,7 +30,7 @@ import ( // BlockWriter is a block writer that allows appending and flushing series to disk. type BlockWriter struct { - logger log.Logger + logger *slog.Logger destinationDir string head *Head @@ -50,7 +49,7 @@ var ErrNoSeriesAppended = errors.New("no series appended, aborting") // contains anything at all. It is the caller's responsibility to // ensure that the resulting blocks do not overlap etc. // Writer ensures the block flush is atomic (via rename). -func NewBlockWriter(logger log.Logger, dir string, blockSize int64) (*BlockWriter, error) { +func NewBlockWriter(logger *slog.Logger, dir string, blockSize int64) (*BlockWriter, error) { w := &BlockWriter{ logger: logger, destinationDir: dir, @@ -95,7 +94,7 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) { // Add +1 millisecond to block maxt because block intervals are half-open: [b.MinTime, b.MaxTime). // Because of this block intervals are always +1 than the total samples it includes. maxt := w.head.MaxTime() + 1 - level.Info(w.logger).Log("msg", "flushing", "series_count", w.head.NumSeries(), "mint", timestamp.Time(mint), "maxt", timestamp.Time(maxt)) + w.logger.Info("flushing", "series_count", w.head.NumSeries(), "mint", timestamp.Time(mint), "maxt", timestamp.Time(maxt)) compactor, err := NewLeveledCompactor(ctx, nil, @@ -121,7 +120,7 @@ func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) { func (w *BlockWriter) Close() error { defer func() { if err := os.RemoveAll(w.chunkDir); err != nil { - level.Error(w.logger).Log("msg", "error in deleting BlockWriter files", "err", err) + w.logger.Error("error in deleting BlockWriter files", "err", err) } }() return w.head.Close() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go index 8cc59f3ea76..6e01798f720 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/bstream.go @@ -86,8 +86,8 @@ func (b *bstream) writeBit(bit bit) { func (b *bstream) writeByte(byt byte) { if b.count == 0 { - b.stream = append(b.stream, 0) - b.count = 8 + b.stream = append(b.stream, byt) + return } i := len(b.stream) - 1 @@ -95,10 +95,8 @@ func (b *bstream) writeByte(byt byte) { // Complete the last byte with the leftmost b.count bits from byt. b.stream[i] |= byt >> (8 - b.count) - b.stream = append(b.stream, 0) - i++ // Write the remainder, if any. - b.stream[i] = byt << b.count + b.stream = append(b.stream, byt< @@ -191,8 +191,8 @@ func (a *xorAppender) Append(t int64, v float64) { case dod == 0: a.b.writeBit(zero) case bitRange(dod, 14): - a.b.writeBits(0b10, 2) - a.b.writeBits(uint64(dod), 14) + a.b.writeByte(0b10<<6 | (uint8(dod>>8) & (1<<6 - 1))) // 0b10 size code combined with 6 bits of dod. + a.b.writeByte(uint8(dod)) // Bottom 8 bits of dod. case bitRange(dod, 17): a.b.writeBits(0b110, 3) a.b.writeBits(uint64(dod), 17) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go index 86bd60381f8..19ff19ce136 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunk_write_queue.go @@ -24,7 +24,7 @@ import ( ) const ( - // Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkrefMap to shrink it again. + // Minimum recorded peak since the last shrinking of chunkWriteQueue.chunkRefMap to shrink it again. chunkRefMapShrinkThreshold = 1000 // Minimum interval between shrinking of chunkWriteQueue.chunkRefMap. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 9b57f28edd2..c176c804839 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -19,16 +19,16 @@ import ( "errors" "fmt" "io" + "log/slog" "os" "path/filepath" "slices" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "go.uber.org/atomic" "golang.org/x/sync/semaphore" @@ -88,7 +88,7 @@ type Compactor interface { // LeveledCompactor implements the Compactor interface. type LeveledCompactor struct { metrics *CompactorMetrics - logger log.Logger + logger *slog.Logger ranges []int64 chunkPool chunkenc.Pool ctx context.Context @@ -176,7 +176,7 @@ type LeveledCompactorOptions struct { EnableOverlappingCompaction bool } -func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { +func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{ MaxBlockChunkSegmentSize: maxBlockChunkSegmentSize, MergeFunc: mergeFunc, @@ -184,14 +184,14 @@ func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Register }) } -func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { +func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { return NewLeveledCompactorWithOptions(ctx, r, l, ranges, pool, LeveledCompactorOptions{ MergeFunc: mergeFunc, EnableOverlappingCompaction: true, }) } -func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts LeveledCompactorOptions) (*LeveledCompactor, error) { +func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts LeveledCompactorOptions) (*LeveledCompactor, error) { if len(ranges) == 0 { return nil, fmt.Errorf("at least one range must be provided") } @@ -199,7 +199,7 @@ func NewLeveledCompactorWithOptions(ctx context.Context, r prometheus.Registerer pool = chunkenc.NewPool() } if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } mergeFunc := opts.MergeFunc if mergeFunc == nil { @@ -550,8 +550,8 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, meta := outBlocks[ix].meta if meta.Stats.NumSamples == 0 { - level.Info(c.logger).Log( - "msg", "compact blocks resulted in empty block", + c.logger.Info( + "compact blocks resulted in empty block", "count", len(blocks), "sources", fmt.Sprintf("%v", uids), "duration", time.Since(start), @@ -561,8 +561,8 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, allOutputBlocksAreEmpty = false ulids[ix] = outBlocks[ix].meta.ULID - level.Info(c.logger).Log( - "msg", "compact blocks", + c.logger.Info( + "compact blocks", "count", len(blocks), "mint", meta.MinTime, "maxt", meta.MaxTime, @@ -580,8 +580,8 @@ func (c *LeveledCompactor) CompactWithBlockPopulator(dest string, dirs []string, b.meta.Compaction.Deletable = true n, err := writeMetaFile(c.logger, b.dir, &b.meta) if err != nil { - level.Error(c.logger).Log( - "msg", "Failed to write 'Deletable' to meta file after compaction", + c.logger.Error( + "Failed to write 'Deletable' to meta file after compaction", "ulid", b.meta.ULID, ) } @@ -680,12 +680,12 @@ func (c *LeveledCompactor) compactOOO(dest string, oooHead *OOOCompactionHead, s for _, ob := range obs { if ob.tmpDir != "" { if removeErr := os.RemoveAll(ob.tmpDir); removeErr != nil { - level.Error(c.logger).Log("msg", "Failed to remove temp folder after failed compaction", "dir", ob.tmpDir, "err", removeErr.Error()) + c.logger.Error("Failed to remove temp folder after failed compaction", "dir", ob.tmpDir, "err", removeErr.Error()) } } if ob.blockDir != "" { if removeErr := os.RemoveAll(ob.blockDir); removeErr != nil { - level.Error(c.logger).Log("msg", "Failed to remove block folder after failed compaction", "dir", ob.blockDir, "err", removeErr.Error()) + c.logger.Error("Failed to remove block folder after failed compaction", "dir", ob.blockDir, "err", removeErr.Error()) } } } @@ -700,8 +700,8 @@ func (c *LeveledCompactor) compactOOO(dest string, oooHead *OOOCompactionHead, s meta := outBlocks[ix][jx].meta if meta.Stats.NumSamples != 0 { noOOOBlock = false - level.Info(c.logger).Log( - "msg", "compact ooo head", + c.logger.Info( + "compact ooo head", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, @@ -716,8 +716,8 @@ func (c *LeveledCompactor) compactOOO(dest string, oooHead *OOOCompactionHead, s } if noOOOBlock { - level.Info(c.logger).Log( - "msg", "compact ooo head resulted in no blocks", + c.logger.Info( + "compact ooo head resulted in no blocks", "duration", time.Since(start), ) return nil, nil @@ -754,8 +754,8 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b } if meta.Stats.NumSamples == 0 { - level.Info(c.logger).Log( - "msg", "write block resulted in empty block", + c.logger.Info( + "write block resulted in empty block", "mint", meta.MinTime, "maxt", meta.MaxTime, "duration", time.Since(start), @@ -763,8 +763,8 @@ func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, b return nil, nil } - level.Info(c.logger).Log( - "msg", "write block", + c.logger.Info( + "write block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, @@ -804,7 +804,7 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blockPop if ob.tmpDir != "" { // RemoveAll returns no error when tmp doesn't exist so it is safe to always run it. if removeErr := os.RemoveAll(ob.tmpDir); removeErr != nil { - level.Error(c.logger).Log("msg", "Failed to remove temp folder after failed compaction", "dir", ob.tmpDir, "err", removeErr.Error()) + c.logger.Error("Failed to remove temp folder after failed compaction", "dir", ob.tmpDir, "err", removeErr.Error()) } } @@ -814,7 +814,7 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blockPop if err != nil && ob.blockDir != "" { // RemoveAll returns no error when tmp doesn't exist so it is safe to always run it. if removeErr := os.RemoveAll(ob.blockDir); removeErr != nil { - level.Error(c.logger).Log("msg", "Failed to remove block folder after failed compaction", "dir", ob.blockDir, "err", removeErr.Error()) + c.logger.Error("Failed to remove block folder after failed compaction", "dir", ob.blockDir, "err", removeErr.Error()) } } } @@ -939,7 +939,7 @@ func (c *LeveledCompactor) write(dest string, outBlocks []shardedBlock, blockPop return nil } -func debugOutOfOrderChunks(lbls labels.Labels, chks []chunks.Meta, logger log.Logger) { +func debugOutOfOrderChunks(lbls labels.Labels, chks []chunks.Meta, logger *slog.Logger) { if len(chks) <= 1 { return } @@ -955,7 +955,6 @@ func debugOutOfOrderChunks(lbls labels.Labels, chks []chunks.Meta, logger log.Lo // Looks like the chunk is out of order. logValues := []any{ - "msg", "found out-of-order chunk when compacting", "num_chunks_for_series", len(chks), "index", i, "labels", lbls.String(), @@ -983,7 +982,7 @@ func debugOutOfOrderChunks(lbls labels.Labels, chks []chunks.Meta, logger log.Lo ) } - level.Warn(logger).Log(logValues...) + logger.Warn("found out-of-order chunk when compacting", logValues...) } } @@ -992,7 +991,7 @@ func timeFromMillis(ms int64) time.Time { } type BlockPopulator interface { - PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, concurrencyOpts LeveledCompactorConcurrencyOptions, blocks []BlockReader, minT, maxT int64, outBlocks []shardedBlock, postingsFunc IndexReaderPostingsFunc) error + PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger *slog.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, concurrencyOpts LeveledCompactorConcurrencyOptions, blocks []BlockReader, minT, maxT int64, outBlocks []shardedBlock, postingsFunc IndexReaderPostingsFunc) error } // IndexReaderPostingsFunc is a function to get a sorted posting iterator from a given index reader. @@ -1015,7 +1014,7 @@ type DefaultBlockPopulator struct{} // It expects sorted blocks input by mint. // If there is more than 1 output block, each output block will only contain series that hash into its shard // (based on total number of output blocks). -func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger log.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, concurrencyOpts LeveledCompactorConcurrencyOptions, blocks []BlockReader, minT, maxT int64, outBlocks []shardedBlock, postingsFunc IndexReaderPostingsFunc) (err error) { +func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *CompactorMetrics, logger *slog.Logger, chunkPool chunkenc.Pool, mergeFunc storage.VerticalChunkSeriesMergeFunc, concurrencyOpts LeveledCompactorConcurrencyOptions, blocks []BlockReader, minT, maxT int64, outBlocks []shardedBlock, postingsFunc IndexReaderPostingsFunc) (err error) { if len(blocks) == 0 { return errors.New("cannot populate block(s) from no readers") } @@ -1049,7 +1048,7 @@ func (c DefaultBlockPopulator) PopulateBlock(ctx context.Context, metrics *Compa if i > 0 && b.Meta().MinTime < globalMaxt { metrics.OverlappingBlocks.Inc() overlapping = true - level.Info(logger).Log("msg", "Found overlapping blocks during compaction") + logger.Info("Found overlapping blocks during compaction") } if b.Meta().MaxTime > globalMaxt { globalMaxt = b.Meta().MaxTime @@ -1319,7 +1318,7 @@ func populateSymbols(ctx context.Context, mergeFunc storage.VerticalChunkSeriesM } // Returns opened blocks, and blocks that should be closed (also returned in case of error). -func openBlocksForCompaction(dirs []string, open []*Block, logger log.Logger, pool chunkenc.Pool, concurrency int) (blocks, blocksToClose []*Block, _ error) { +func openBlocksForCompaction(dirs []string, open []*Block, logger *slog.Logger, pool chunkenc.Pool, concurrency int) (blocks, blocksToClose []*Block, _ error) { blocks = make([]*Block, 0, len(dirs)) blocksToClose = make([]*Block, 0, len(dirs)) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 9f40a2c4999..4d721dca05f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -20,6 +20,7 @@ import ( "fmt" "io" "io/fs" + "log/slog" "math" "math/rand" "os" @@ -29,10 +30,9 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "go.uber.org/atomic" "golang.org/x/sync/errgroup" @@ -274,7 +274,7 @@ type Options struct { BlockChunkQuerierFunc BlockChunkQuerierFunc } -type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) +type NewCompactorFunc func(ctx context.Context, r prometheus.Registerer, l *slog.Logger, ranges []int64, pool chunkenc.Pool, opts *Options) (Compactor, error) type BlocksToDeleteFunc func(blocks []*Block) map[ulid.ULID]struct{} @@ -288,7 +288,7 @@ type DB struct { dir string locker *tsdbutil.DirLocker - logger log.Logger + logger *slog.Logger metrics *dbMetrics opts *Options chunkPool chunkenc.Pool @@ -479,7 +479,7 @@ var ErrClosed = errors.New("db already closed") // Current implementation doesn't support concurrency so // all API calls should happen in the same go routine. type DBReadOnly struct { - logger log.Logger + logger *slog.Logger dir string sandboxDir string closers []io.Closer @@ -487,7 +487,7 @@ type DBReadOnly struct { } // OpenDBReadOnly opens DB in the given directory for read only operations. -func OpenDBReadOnly(dir, sandboxDirRoot string, l log.Logger) (*DBReadOnly, error) { +func OpenDBReadOnly(dir, sandboxDirRoot string, l *slog.Logger) (*DBReadOnly, error) { if _, err := os.Stat(dir); err != nil { return nil, fmt.Errorf("opening the db dir: %w", err) } @@ -501,7 +501,7 @@ func OpenDBReadOnly(dir, sandboxDirRoot string, l log.Logger) (*DBReadOnly, erro } if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } return &DBReadOnly{ @@ -700,7 +700,7 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { if len(corrupted) > 0 { for _, b := range loadable { if err := b.Close(); err != nil { - level.Warn(db.logger).Log("msg", "Closing block failed", "err", err, "block", b) + db.logger.Warn("Closing block failed", "err", err, "block", b) } } errs := tsdb_errors.NewMulti() @@ -732,7 +732,7 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { blockMetas = append(blockMetas, b.Meta()) } if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 { - level.Warn(db.logger).Log("msg", "Overlapping blocks found during opening", "detail", overlaps.String()) + db.logger.Warn("Overlapping blocks found during opening", "detail", overlaps.String()) } // Close all previously open readers and add the new ones to the cache. @@ -810,7 +810,7 @@ func (db *DBReadOnly) Close() error { defer func() { // Delete the temporary sandbox directory that was created when opening the DB. if err := os.RemoveAll(db.sandboxDir); err != nil { - level.Error(db.logger).Log("msg", "delete sandbox dir", "err", err) + db.logger.Error("delete sandbox dir", "err", err) } }() select { @@ -824,7 +824,7 @@ func (db *DBReadOnly) Close() error { } // Open returns a new DB in the given directory. If options are empty, DefaultOptions will be used. -func Open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, stats *DBStats) (db *DB, err error) { +func Open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, stats *DBStats) (db *DB, err error) { var rngs []int64 opts, rngs = validateOpts(opts, nil) @@ -877,12 +877,12 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) { // open returns a new DB in the given directory. // It initializes the lockfile, WAL, compactor, and Head (by replaying the WAL), and runs the database. // It is not safe to open more than one DB in the same directory. -func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs []int64, stats *DBStats) (_ *DB, returnedErr error) { +func open(dir string, l *slog.Logger, r prometheus.Registerer, opts *Options, rngs []int64, stats *DBStats) (_ *DB, returnedErr error) { if err := os.MkdirAll(dir, 0o777); err != nil { return nil, err } if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } if stats == nil { stats = NewDBStats() @@ -1067,17 +1067,17 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs db.head.metrics.walCorruptionsTotal.Inc() var e *errLoadWbl if errors.As(initErr, &e) { - level.Warn(db.logger).Log("msg", "Encountered WBL read error, attempting repair", "err", initErr) + db.logger.Warn("Encountered WBL read error, attempting repair", "err", initErr) if err := wbl.Repair(e.err); err != nil { return nil, fmt.Errorf("repair corrupted WBL: %w", err) } - level.Info(db.logger).Log("msg", "Successfully repaired WBL") + db.logger.Info("Successfully repaired WBL") } else { - level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr) + db.logger.Warn("Encountered WAL read error, attempting repair", "err", initErr) if err := wal.Repair(initErr); err != nil { return nil, fmt.Errorf("repair corrupted WAL: %w", err) } - level.Info(db.logger).Log("msg", "Successfully repaired WAL") + db.logger.Info("Successfully repaired WAL") } } @@ -1095,7 +1095,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs return db, nil } -func removeBestEffortTmpDirs(l log.Logger, dir string) error { +func removeBestEffortTmpDirs(l *slog.Logger, dir string) error { files, err := os.ReadDir(dir) if os.IsNotExist(err) { return nil @@ -1106,10 +1106,10 @@ func removeBestEffortTmpDirs(l log.Logger, dir string) error { for _, f := range files { if isTmpDir(f) { if err := os.RemoveAll(filepath.Join(dir, f.Name())); err != nil { - level.Error(l).Log("msg", "failed to delete tmp block dir", "dir", filepath.Join(dir, f.Name()), "err", err) + l.Error("failed to delete tmp block dir", "dir", filepath.Join(dir, f.Name()), "err", err) continue } - level.Info(l).Log("msg", "Found and deleted tmp block dir", "dir", filepath.Join(dir, f.Name())) + l.Info("Found and deleted tmp block dir", "dir", filepath.Join(dir, f.Name())) } } return nil @@ -1147,7 +1147,7 @@ func (db *DB) run(ctx context.Context) { case <-time.After(1 * time.Minute): db.cmtx.Lock() if err := db.reloadBlocks(); err != nil { - level.Error(db.logger).Log("msg", "reloadBlocks", "err", err) + db.logger.Error("reloadBlocks", "err", err) } db.cmtx.Unlock() @@ -1163,7 +1163,7 @@ func (db *DB) run(ctx context.Context) { db.autoCompactMtx.Lock() if db.autoCompact { if err := db.Compact(ctx); err != nil { - level.Error(db.logger).Log("msg", "compaction failed", "err", err) + db.logger.Error("compaction failed", "err", err) backoff = exponential(backoff, 1*time.Second, 1*time.Minute) } else { backoff = 0 @@ -1377,8 +1377,8 @@ func (db *DB) Compact(ctx context.Context) (returnErr error) { compactionDuration := time.Since(start) if compactionDuration.Milliseconds() > db.head.chunkRange.Load() { - level.Warn(db.logger).Log( - "msg", "Head compaction took longer than the block time range, compactions are falling behind and won't be able to catch up", + db.logger.Warn( + "Head compaction took longer than the block time range, compactions are falling behind and won't be able to catch up", "duration", compactionDuration.String(), "block_range", db.head.chunkRange.Load(), ) @@ -1514,15 +1514,15 @@ func (db *DB) compactOOO(dest string, oooHead *OOOCompactionHead) (_ []ulid.ULID } if len(ulids) == 0 { - level.Info(db.logger).Log( - "msg", "compact ooo head resulted in no blocks", + db.logger.Info( + "compact ooo head resulted in no blocks", "duration", time.Since(start), ) return nil, nil } - level.Info(db.logger).Log( - "msg", "out-of-order compaction completed", + db.logger.Info( + "out-of-order compaction completed", "duration", time.Since(start), "ulids", fmt.Sprintf("%v", ulids), ) @@ -1567,7 +1567,7 @@ func (db *DB) compactBlocks() (err error) { // long enough that we end up with a HEAD block that needs to be written. // Check if that's the case and stop compactions early. if db.head.compactable() && !db.waitingForCompactionDelay() { - level.Warn(db.logger).Log("msg", "aborting block compactions to persit the head block") + db.logger.Warn("aborting block compactions to persit the head block") return nil } @@ -1663,7 +1663,7 @@ func (db *DB) reloadBlocks() (err error) { for _, b := range block.Meta().Compaction.Parents { if _, ok := corrupted[b.ULID]; ok { delete(corrupted, b.ULID) - level.Warn(db.logger).Log("msg", "Found corrupted block, but replaced by compacted one so it's safe to delete. This should not happen with atomic deletes.", "block", b.ULID) + db.logger.Warn("Found corrupted block, but replaced by compacted one so it's safe to delete. This should not happen with atomic deletes.", "block", b.ULID) } deletable[b.ULID] = nil } @@ -1725,7 +1725,7 @@ func (db *DB) reloadBlocks() (err error) { blockMetas = append(blockMetas, b.Meta()) } if overlaps := OverlappingBlocks(blockMetas); len(overlaps) > 0 { - level.Warn(db.logger).Log("msg", "Overlapping blocks found during reloadBlocks", "detail", overlaps.String()) + db.logger.Warn("Overlapping blocks found during reloadBlocks", "detail", overlaps.String()) } } @@ -1741,7 +1741,7 @@ func (db *DB) reloadBlocks() (err error) { return nil } -func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool, cache *hashcache.SeriesHashCache, postingsCacheTTL time.Duration, postingsCacheMaxItems int, postingsCacheMaxBytes int64, postingsCacheForce bool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { +func openBlocks(l *slog.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool, cache *hashcache.SeriesHashCache, postingsCacheTTL time.Duration, postingsCacheMaxItems int, postingsCacheMaxBytes int64, postingsCacheForce bool) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { bDirs, err := blockDirs(dir) if err != nil { return nil, nil, fmt.Errorf("find blocks: %w", err) @@ -1751,7 +1751,7 @@ func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Po for _, bDir := range bDirs { meta, _, err := readMetaFile(bDir) if err != nil { - level.Error(l).Log("msg", "Failed to read meta.json for a block during reloadBlocks. Skipping", "dir", bDir, "err", err) + l.Error("Failed to read meta.json for a block during reloadBlocks. Skipping", "dir", bDir, "err", err) continue } @@ -1873,7 +1873,7 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { for ulid, block := range blocks { if block != nil { if err := block.Close(); err != nil { - level.Warn(db.logger).Log("msg", "Closing block failed", "err", err, "block", ulid) + db.logger.Warn("Closing block failed", "err", err, "block", ulid) } } @@ -1894,7 +1894,7 @@ func (db *DB) deleteBlocks(blocks map[ulid.ULID]*Block) error { if err := os.RemoveAll(tmpToDelete); err != nil { return fmt.Errorf("delete obsolete block %s: %w", ulid, err) } - level.Info(db.logger).Log("msg", "Deleting obsolete block", "block", ulid) + db.logger.Info("Deleting obsolete block", "block", ulid) } return nil @@ -2062,7 +2062,7 @@ func (db *DB) DisableCompactions() { defer db.autoCompactMtx.Unlock() db.autoCompact = false - level.Info(db.logger).Log("msg", "Compactions disabled") + db.logger.Info("Compactions disabled") } // EnableCompactions enables auto compactions. @@ -2071,7 +2071,7 @@ func (db *DB) EnableCompactions() { defer db.autoCompactMtx.Unlock() db.autoCompact = true - level.Info(db.logger).Log("msg", "Compactions enabled") + db.logger.Info("Compactions enabled") } func (db *DB) generateCompactionDelay() time.Duration { @@ -2101,7 +2101,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error { defer db.mtx.RUnlock() for _, b := range db.blocks { - level.Info(db.logger).Log("msg", "Snapshotting block", "block", b) + db.logger.Info("Snapshotting block", "block", b) if err := b.Snapshot(dir); err != nil { return fmt.Errorf("error snapshotting block: %s: %w", b.Dir(), err) @@ -2372,7 +2372,7 @@ func (db *DB) CleanTombstones() (err error) { for _, uid := range uids { dir := filepath.Join(db.Dir(), uid.String()) if err := os.RemoveAll(dir); err != nil { - level.Error(db.logger).Log("msg", "failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) + db.logger.Error("failed to delete block after failed `CleanTombstones`", "dir", dir, "err", err) } } if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go b/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go index 88fdd30c850..cc7d0990f6a 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go @@ -20,7 +20,6 @@ import ( "hash" "hash/crc32" "math" - "unsafe" "github.com/dennwc/varint" ) @@ -75,8 +74,7 @@ func (e *Encbuf) PutVarint64(x int64) { // PutUvarintStr writes a string to the buffer prefixed by its varint length (in bytes!). func (e *Encbuf) PutUvarintStr(s string) { - b := *(*[]byte)(unsafe.Pointer(&s)) - e.PutUvarint(len(b)) + e.PutUvarint(len(s)) e.PutString(s) } @@ -201,8 +199,9 @@ func (d *Decbuf) UvarintStr() string { return string(d.UvarintBytes()) } -// UvarintBytes returns invalid values if the byte slice goes away. -// Compared to UvarintStr, it avoid allocations. +// UvarintBytes returns a pointer to internal data; +// the return value becomes invalid if the byte slice goes away. +// Compared to UvarintStr, this avoids allocations. func (d *Decbuf) UvarintBytes() []byte { l := d.Uvarint64() if d.E != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go b/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go index 8f39377de0c..31d461bed9e 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go @@ -29,7 +29,7 @@ import ( ) const ( - // Indicates that there is no index entry for an exmplar. + // Indicates that there is no index entry for an exemplar. noExemplar = -1 // Estimated number of exemplars per series, for sizing the index. estimatedExemplarsPerSeries = 16 diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 9f175b446d3..324b0a60607 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "path/filepath" "runtime" @@ -25,10 +26,9 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "go.uber.org/atomic" "github.com/prometheus/prometheus/config" @@ -83,7 +83,7 @@ type Head struct { wal, wbl *wlog.WL exemplarMetrics *ExemplarMetrics exemplars ExemplarStorage - logger log.Logger + logger *slog.Logger appendPool zeropool.Pool[[]record.RefSample] exemplarsPool zeropool.Pool[[]exemplarWithSeriesRef] histogramsPool zeropool.Pool[[]record.RefHistogramSample] @@ -157,10 +157,6 @@ type HeadOptions struct { // OutOfOrderTimeWindow is > 0 EnableOOONativeHistograms atomic.Bool - // EnableCreatedTimestampZeroIngestion enables the ingestion of the created timestamp as a synthetic zero sample. - // See: https://github.com/prometheus/proposals/blob/main/proposals/2023-06-13_created-timestamp.md - EnableCreatedTimestampZeroIngestion bool - ChunkRange int64 // ChunkDirRoot is the parent directory of the chunks directory. ChunkDirRoot string @@ -248,10 +244,10 @@ type SeriesLifecycleCallback interface { } // NewHead opens the head block in dir. -func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wlog.WL, opts *HeadOptions, stats *HeadStats) (*Head, error) { +func NewHead(r prometheus.Registerer, l *slog.Logger, wal, wbl *wlog.WL, opts *HeadOptions, stats *HeadStats) (*Head, error) { var err error if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } if opts.OutOfOrderTimeWindow.Load() < 0 { @@ -597,7 +593,7 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { }, func() float64 { val, err := h.chunkDiskMapper.Size() if err != nil { - level.Error(h.logger).Log("msg", "Failed to calculate size of \"chunks_head\" dir", + h.logger.Error("Failed to calculate size of \"chunks_head\" dir", "err", err.Error()) } return float64(val) @@ -660,7 +656,7 @@ func (h *Head) Init(minValidTime int64) error { } }() - level.Info(h.logger).Log("msg", "Replaying on-disk memory mappable chunks if any") + h.logger.Info("Replaying on-disk memory mappable chunks if any") start := time.Now() snapIdx, snapOffset := -1, 0 @@ -669,7 +665,7 @@ func (h *Head) Init(minValidTime int64) error { snapshotLoaded := false var chunkSnapshotLoadDuration time.Duration if h.opts.EnableMemorySnapshotOnShutdown { - level.Info(h.logger).Log("msg", "Chunk snapshot is enabled, replaying from the snapshot") + h.logger.Info("Chunk snapshot is enabled, replaying from the snapshot") // If there are any WAL files, there should be at least one WAL file with an index that is current or newer // than the snapshot index. If the WAL index is behind the snapshot index somehow, the snapshot is assumed // to be outdated. @@ -682,14 +678,14 @@ func (h *Head) Init(minValidTime int64) error { _, idx, _, err := LastChunkSnapshot(h.opts.ChunkDirRoot) if err != nil && !errors.Is(err, record.ErrNotFound) { - level.Error(h.logger).Log("msg", "Could not find last snapshot", "err", err) + h.logger.Error("Could not find last snapshot", "err", err) } if err == nil && endAt < idx { loadSnapshot = false - level.Warn(h.logger).Log("msg", "Last WAL file is behind snapshot, removing snapshots") + h.logger.Warn("Last WAL file is behind snapshot, removing snapshots") if err := DeleteChunkSnapshots(h.opts.ChunkDirRoot, math.MaxInt, math.MaxInt); err != nil { - level.Error(h.logger).Log("msg", "Error while deleting snapshot directories", "err", err) + h.logger.Error("Error while deleting snapshot directories", "err", err) } } } @@ -699,14 +695,14 @@ func (h *Head) Init(minValidTime int64) error { if err == nil { snapshotLoaded = true chunkSnapshotLoadDuration = time.Since(start) - level.Info(h.logger).Log("msg", "Chunk snapshot loading time", "duration", chunkSnapshotLoadDuration.String()) + h.logger.Info("Chunk snapshot loading time", "duration", chunkSnapshotLoadDuration.String()) } if err != nil { snapIdx, snapOffset = -1, 0 refSeries = make(map[chunks.HeadSeriesRef]*memSeries) h.metrics.snapshotReplayErrorTotal.Inc() - level.Error(h.logger).Log("msg", "Failed to load chunk snapshot", "err", err) + h.logger.Error("Failed to load chunk snapshot", "err", err) // We clear the partially loaded data to replay fresh from the WAL. if err := h.resetInMemoryState(); err != nil { return err @@ -730,7 +726,7 @@ func (h *Head) Init(minValidTime int64) error { mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.loadMmappedChunks(refSeries) if err != nil { // TODO(codesome): clear out all m-map chunks here for refSeries. - level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err) + h.logger.Error("Loading on-disk chunks failed", "err", err) var cerr *chunks.CorruptionErr if errors.As(err, &cerr) { h.metrics.mmapChunkCorruptionTotal.Inc() @@ -747,15 +743,15 @@ func (h *Head) Init(minValidTime int64) error { } } mmapChunkReplayDuration = time.Since(mmapChunkReplayStart) - level.Info(h.logger).Log("msg", "On-disk memory mappable chunks replay completed", "duration", mmapChunkReplayDuration.String()) + h.logger.Info("On-disk memory mappable chunks replay completed", "duration", mmapChunkReplayDuration.String()) } if h.wal == nil { - level.Info(h.logger).Log("msg", "WAL not found") + h.logger.Info("WAL not found") return nil } - level.Info(h.logger).Log("msg", "Replaying WAL, this may take a while") + h.logger.Info("Replaying WAL, this may take a while") checkpointReplayStart := time.Now() // Backfill the checkpoint first if it exists. @@ -781,7 +777,7 @@ func (h *Head) Init(minValidTime int64) error { } defer func() { if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err) + h.logger.Warn("Error while closing the wal segments reader", "err", err) } }() @@ -792,7 +788,7 @@ func (h *Head) Init(minValidTime int64) error { } h.updateWALReplayStatusRead(startFrom) startFrom++ - level.Info(h.logger).Log("msg", "WAL checkpoint loaded") + h.logger.Info("WAL checkpoint loaded") } checkpointReplayDuration := time.Since(checkpointReplayStart) @@ -822,12 +818,12 @@ func (h *Head) Init(minValidTime int64) error { } err = h.loadWAL(wlog.NewReader(sr), syms, multiRef, mmappedChunks, oooMmappedChunks) if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err) + h.logger.Warn("Error while closing the wal segments reader", "err", err) } if err != nil { return err } - level.Info(h.logger).Log("msg", "WAL segment loaded", "segment", i, "maxSegment", endAt) + h.logger.Info("WAL segment loaded", "segment", i, "maxSegment", endAt) h.updateWALReplayStatusRead(i) } walReplayDuration := time.Since(walReplayStart) @@ -850,12 +846,12 @@ func (h *Head) Init(minValidTime int64) error { sr := wlog.NewSegmentBufReader(s) err = h.loadWBL(wlog.NewReader(sr), syms, multiRef, lastMmapRef) if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "Error while closing the wbl segments reader", "err", err) + h.logger.Warn("Error while closing the wbl segments reader", "err", err) } if err != nil { return &errLoadWbl{err} } - level.Info(h.logger).Log("msg", "WBL segment loaded", "segment", i, "maxSegment", endAt) + h.logger.Info("WBL segment loaded", "segment", i, "maxSegment", endAt) h.updateWALReplayStatusRead(i) } } @@ -864,8 +860,8 @@ func (h *Head) Init(minValidTime int64) error { totalReplayDuration := time.Since(start) h.metrics.dataTotalReplayDuration.Set(totalReplayDuration.Seconds()) - level.Info(h.logger).Log( - "msg", "WAL replay completed", + h.logger.Info( + "WAL replay completed", "checkpoint_replay_duration", checkpointReplayDuration.String(), "wal_replay_duration", walReplayDuration.String(), "wbl_replay_duration", wblReplayDuration.String(), @@ -975,28 +971,28 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) // removeCorruptedMmappedChunks attempts to delete the corrupted mmapped chunks and if it fails, it clears all the previously // loaded mmapped chunks. func (h *Head) removeCorruptedMmappedChunks(err error) (map[chunks.HeadSeriesRef][]*mmappedChunk, map[chunks.HeadSeriesRef][]*mmappedChunk, chunks.ChunkDiskMapperRef, error) { - level.Info(h.logger).Log("msg", "Deleting mmapped chunk files") + h.logger.Info("Deleting mmapped chunk files") // We never want to preserve the in-memory series from snapshots if we are repairing m-map chunks. if err := h.resetInMemoryState(); err != nil { return map[chunks.HeadSeriesRef][]*mmappedChunk{}, map[chunks.HeadSeriesRef][]*mmappedChunk{}, 0, err } - level.Info(h.logger).Log("msg", "Deleting mmapped chunk files") + h.logger.Info("Deleting mmapped chunk files") if err := h.chunkDiskMapper.DeleteCorrupted(err); err != nil { - level.Info(h.logger).Log("msg", "Deletion of corrupted mmap chunk files failed, discarding chunk files completely", "err", err) + h.logger.Info("Deletion of corrupted mmap chunk files failed, discarding chunk files completely", "err", err) if err := h.chunkDiskMapper.Truncate(math.MaxUint32); err != nil { - level.Error(h.logger).Log("msg", "Deletion of all mmap chunk files failed", "err", err) + h.logger.Error("Deletion of all mmap chunk files failed", "err", err) } return map[chunks.HeadSeriesRef][]*mmappedChunk{}, map[chunks.HeadSeriesRef][]*mmappedChunk{}, 0, nil } - level.Info(h.logger).Log("msg", "Deletion of mmap chunk files successful, reattempting m-mapping the on-disk chunks") + h.logger.Info("Deletion of mmap chunk files successful, reattempting m-mapping the on-disk chunks") mmappedChunks, oooMmappedChunks, lastRef, err := h.loadMmappedChunks(make(map[chunks.HeadSeriesRef]*memSeries)) if err != nil { - level.Error(h.logger).Log("msg", "Loading on-disk chunks failed, discarding chunk files completely", "err", err) + h.logger.Error("Loading on-disk chunks failed, discarding chunk files completely", "err", err) if err := h.chunkDiskMapper.Truncate(math.MaxUint32); err != nil { - level.Error(h.logger).Log("msg", "Deletion of all mmap chunk files failed after failed loading", "err", err) + h.logger.Error("Deletion of all mmap chunk files failed after failed loading", "err", err) } mmappedChunks = map[chunks.HeadSeriesRef][]*mmappedChunk{} } @@ -1031,7 +1027,7 @@ func (h *Head) ApplyConfig(cfg *config.Config, wbl *wlog.WL) { } migrated := h.exemplars.(*CircularExemplarStorage).Resize(newSize) - level.Info(h.logger).Log("msg", "Exemplar storage resized", "from", prevSize, "to", newSize, "migrated", migrated) + h.logger.Info("Exemplar storage resized", "from", prevSize, "to", newSize, "migrated", migrated) } // SetOutOfOrderTimeWindow updates the out of order related parameters. @@ -1342,7 +1338,7 @@ func (h *Head) truncateWAL(mint int64) error { // If truncating fails, we'll just try again at the next checkpoint. // Leftover segments will just be ignored in the future if there's a checkpoint // that supersedes them. - level.Error(h.logger).Log("msg", "truncating segments failed", "err", err) + h.logger.Error("truncating segments failed", "err", err) } // The checkpoint is written and segments before it is truncated, so we no @@ -1360,12 +1356,12 @@ func (h *Head) truncateWAL(mint int64) error { // Leftover old checkpoints do not cause problems down the line beyond // occupying disk space. // They will just be ignored since a higher checkpoint exists. - level.Error(h.logger).Log("msg", "delete old checkpoints", "err", err) + h.logger.Error("delete old checkpoints", "err", err) h.metrics.checkpointDeleteFail.Inc() } h.metrics.walTruncateDuration.Observe(time.Since(start).Seconds()) - level.Info(h.logger).Log("msg", "WAL checkpoint complete", + h.logger.Info("WAL checkpoint complete", "first", first, "last", last, "duration", time.Since(start)) return nil @@ -1403,7 +1399,7 @@ func (h *Head) truncateSeriesAndChunkDiskMapper(caller string) error { start := time.Now() headMaxt := h.MaxTime() actualMint, minOOOTime, minMmapFile := h.gc() - level.Info(h.logger).Log("msg", "Head GC completed", "caller", caller, "duration", time.Since(start)) + h.logger.Info("Head GC completed", "caller", caller, "duration", time.Since(start)) h.metrics.gcDuration.Observe(time.Since(start).Seconds()) if actualMint > h.minTime.Load() { @@ -1555,7 +1551,7 @@ func (h *Head) Delete(ctx context.Context, mint, maxt int64, ms ...*labels.Match series := h.series.getByID(chunks.HeadSeriesRef(p.At())) if series == nil { - level.Debug(h.logger).Log("msg", "Series not found in Head.Delete") + h.logger.Debug("Series not found in Head.Delete") continue } @@ -2156,7 +2152,7 @@ type memSeries struct { // before compaction: mmappedChunks=[p5,p6,p7,p8,p9] firstChunkID=5 // after compaction: mmappedChunks=[p7,p8,p9] firstChunkID=7 // - // pN is the pointer to the mmappedChunk referered to by HeadChunkID=N + // pN is the pointer to the mmappedChunk referred to by HeadChunkID=N mmappedChunks []*mmappedChunk // Most recent chunks in memory that are still being built or waiting to be mmapped. // This is a linked list, headChunks points to the most recent chunk, headChunks.next points diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go index 6a308abb14b..b545e97ae33 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -17,11 +17,9 @@ import ( "context" "errors" "fmt" + "log/slog" "math" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -42,6 +40,12 @@ type initAppender struct { var _ storage.GetRef = &initAppender{} +func (a *initAppender) SetOptions(opts *storage.AppendOptions) { + if a.app != nil { + a.app.SetOptions(opts) + } +} + func (a *initAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { if a.app != nil { return a.app.Append(ref, lset, t, v) @@ -79,6 +83,16 @@ func (a *initAppender) AppendHistogram(ref storage.SeriesRef, l labels.Labels, t return a.app.AppendHistogram(ref, l, t, h, fh) } +func (a *initAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, l labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if a.app != nil { + return a.app.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh) + } + a.head.initTime(t) + a.app = a.head.appender() + + return a.app.AppendHistogramCTZeroSample(ref, l, t, ct, h, fh) +} + func (a *initAppender) UpdateMetadata(ref storage.SeriesRef, l labels.Labels, m metadata.Metadata) (storage.SeriesRef, error) { if a.app != nil { return a.app.UpdateMetadata(ref, l, m) @@ -318,6 +332,11 @@ type headAppender struct { appendID, cleanupAppendIDsBelow uint64 closed bool + hints *storage.AppendOptions +} + +func (a *headAppender) SetOptions(opts *storage.AppendOptions) { + a.hints = opts } func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { @@ -351,13 +370,18 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 } s.Lock() + + defer s.Unlock() // TODO(codesome): If we definitely know at this point that the sample is ooo, then optimise // to skip that sample from the WAL and write only in the WBL. - _, delta, err := s.appendable(t, v, a.headMaxt, a.minValidTime, a.oooTimeWindow) + isOOO, delta, err := s.appendable(t, v, a.headMaxt, a.minValidTime, a.oooTimeWindow) if err == nil { + if isOOO && a.hints != nil && a.hints.DiscardOutOfOrder { + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Inc() + return 0, storage.ErrOutOfOrderSample + } s.pendingCommit = true } - s.Unlock() if delta > 0 { a.head.metrics.oooHistogram.Observe(float64(delta) / 1000) } @@ -392,7 +416,7 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 // storage.CreatedTimestampAppender.AppendCTZeroSample for further documentation. func (a *headAppender) AppendCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64) (storage.SeriesRef, error) { if ct >= t { - return 0, fmt.Errorf("CT is newer or the same as sample's timestamp, ignoring") + return 0, storage.ErrCTNewerThanSample } s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) @@ -450,9 +474,10 @@ func (a *headAppender) getOrCreate(lset labels.Labels) (s *memSeries, created bo return s, created, nil } -// appendable checks whether the given sample is valid for appending to the series. (if we return false and no error) -// The sample belongs to the out of order chunk if we return true and no error. -// An error signifies the sample cannot be handled. +// appendable checks whether the given sample is valid for appending to the series. +// If the sample is valid and in-order, it returns false with no error. +// If the sample belongs to the out-of-order chunk, it returns true with no error. +// If the sample cannot be handled, it returns an error. func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTimeWindow int64) (isOOO bool, oooDelta int64, err error) { // Check if we can append in the in-order chunk. if t >= minValidTime { @@ -743,6 +768,102 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels return storage.SeriesRef(s.ref), nil } +func (a *headAppender) AppendHistogramCTZeroSample(ref storage.SeriesRef, lset labels.Labels, t, ct int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { + if !a.head.opts.EnableNativeHistograms.Load() { + return 0, storage.ErrNativeHistogramsDisabled + } + + if ct >= t { + return 0, storage.ErrCTNewerThanSample + } + + var created bool + s := a.head.series.getByID(chunks.HeadSeriesRef(ref)) + if s == nil { + var err error + s, created, err = a.getOrCreate(lset) + if err != nil { + return 0, err + } + } + + switch { + case h != nil: + zeroHistogram := &histogram.Histogram{} + s.Lock() + + // TODO(krajorama): reorganize Commit() to handle samples in append order + // not floats first and then histograms. Then we would not need to do this. + // This whole "if" should be removed. + if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil { + s.lastHistogramValue = zeroHistogram + } + + // Although we call `appendableHistogram` with oooHistogramsEnabled=true, for CTZeroSamples OOO is not allowed. + // We set it to true to make this implementation as close as possible to the float implementation. + isOOO, _, err := s.appendableHistogram(ct, zeroHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow, true) + if err != nil { + s.Unlock() + if errors.Is(err, storage.ErrOutOfOrderSample) { + return 0, storage.ErrOutOfOrderCT + } + } + // OOO is not allowed because after the first scrape, CT will be the same for most (if not all) future samples. + // This is to prevent the injected zero from being marked as OOO forever. + if isOOO { + s.Unlock() + return 0, storage.ErrOutOfOrderCT + } + s.pendingCommit = true + s.Unlock() + a.histograms = append(a.histograms, record.RefHistogramSample{ + Ref: s.ref, + T: ct, + H: zeroHistogram, + }) + a.histogramSeries = append(a.histogramSeries, s) + case fh != nil: + zeroFloatHistogram := &histogram.FloatHistogram{} + s.Lock() + + // TODO(krajorama): reorganize Commit() to handle samples in append order + // not floats first and then histograms. Then we would not need to do this. + // This whole "if" should be removed. + if created && s.lastHistogramValue == nil && s.lastFloatHistogramValue == nil { + s.lastFloatHistogramValue = zeroFloatHistogram + } + + // Although we call `appendableFloatHistogram` with oooHistogramsEnabled=true, for CTZeroSamples OOO is not allowed. + // We set it to true to make this implementation as close as possible to the float implementation. + isOOO, _, err := s.appendableFloatHistogram(ct, zeroFloatHistogram, a.headMaxt, a.minValidTime, a.oooTimeWindow, true) // OOO is not allowed for CTZeroSamples. + if err != nil { + s.Unlock() + if errors.Is(err, storage.ErrOutOfOrderSample) { + return 0, storage.ErrOutOfOrderCT + } + } + // OOO is not allowed because after the first scrape, CT will be the same for most (if not all) future samples. + // This is to prevent the injected zero from being marked as OOO forever. + if isOOO { + s.Unlock() + return 0, storage.ErrOutOfOrderCT + } + s.pendingCommit = true + s.Unlock() + a.floatHistograms = append(a.floatHistograms, record.RefFloatHistogramSample{ + Ref: s.ref, + T: ct, + FH: zeroFloatHistogram, + }) + a.floatHistogramSeries = append(a.floatHistogramSeries, s) + } + + if ct > a.maxt { + a.maxt = ct + } + return storage.SeriesRef(s.ref), nil +} + // UpdateMetadata for headAppender assumes the series ref already exists, and so it doesn't // use getOrCreate or make any of the lset sanity checks that Append does. func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, meta metadata.Metadata) (storage.SeriesRef, error) { @@ -863,23 +984,38 @@ func exemplarsForEncoding(es []exemplarWithSeriesRef) []record.RefExemplar { return ret } -// Commit writes to the WAL and adds the data to the Head. -// TODO(codesome): Refactor this method to reduce indentation and make it more readable. -func (a *headAppender) Commit() (err error) { - if a.closed { - return ErrAppenderClosed - } - defer func() { a.closed = true }() - - if err := a.log(); err != nil { - _ = a.Rollback() // Most likely the same error will happen again. - return fmt.Errorf("write to WAL: %w", err) - } - - if a.head.writeNotified != nil { - a.head.writeNotified.Notify() - } +type appenderCommitContext struct { + floatsAppended int + histogramsAppended int + // Number of samples out of order but accepted: with ooo enabled and within time window. + oooFloatsAccepted int + oooHistogramAccepted int + // Number of samples rejected due to: out of order but OOO support disabled. + floatOOORejected int + histoOOORejected int + // Number of samples rejected due to: out of order but too old (OOO support enabled, but outside time window). + floatTooOldRejected int + histoTooOldRejected int + // Number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled). + floatOOBRejected int + histoOOBRejected int + inOrderMint int64 + inOrderMaxt int64 + oooMinT int64 + oooMaxT int64 + wblSamples []record.RefSample + wblHistograms []record.RefHistogramSample + wblFloatHistograms []record.RefFloatHistogramSample + oooMmapMarkers map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef + oooMmapMarkersCount int + oooRecords [][]byte + oooCapMax int64 + appendChunkOpts chunkOpts + enc record.Encoder +} +// commitExemplars adds all exemplars from headAppender to the head's exemplar storage. +func (a *headAppender) commitExemplars() { // No errors logging to WAL, so pass the exemplars along to the in memory storage. for _, e := range a.exemplars { s := a.head.series.getByID(chunks.HeadSeriesRef(e.ref)) @@ -894,128 +1030,117 @@ func (a *headAppender) Commit() (err error) { if errors.Is(err, storage.ErrOutOfOrderExemplar) { continue } - level.Debug(a.head.logger).Log("msg", "Unknown error while adding exemplar", "err", err) + a.head.logger.Debug("Unknown error while adding exemplar", "err", err) } } +} - defer a.head.metrics.activeAppenders.Dec() - defer a.head.putAppendBuffer(a.samples) - defer a.head.putSeriesBuffer(a.sampleSeries) - defer a.head.putExemplarBuffer(a.exemplars) - defer a.head.putHistogramBuffer(a.histograms) - defer a.head.putFloatHistogramBuffer(a.floatHistograms) - defer a.head.putMetadataBuffer(a.metadata) - defer a.head.iso.closeAppend(a.appendID) +func (acc *appenderCommitContext) collectOOORecords(a *headAppender) { + if a.head.wbl == nil { + // WBL is not enabled. So no need to collect. + acc.wblSamples = nil + acc.wblHistograms = nil + acc.wblFloatHistograms = nil + acc.oooMmapMarkers = nil + acc.oooMmapMarkersCount = 0 + return + } - var ( - floatsAppended = len(a.samples) - histogramsAppended = len(a.histograms) + len(a.floatHistograms) - // number of samples out of order but accepted: with ooo enabled and within time window - oooFloatsAccepted int - oooHistogramAccepted int - // number of samples rejected due to: out of order but OOO support disabled. - floatOOORejected int - histoOOORejected int - // number of samples rejected due to: that are out of order but too old (OOO support enabled, but outside time window) - floatTooOldRejected int - histoTooOldRejected int - // number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled) - floatOOBRejected int - histoOOBRejected int - inOrderMint int64 = math.MaxInt64 - inOrderMaxt int64 = math.MinInt64 - oooMinT int64 = math.MaxInt64 - oooMaxT int64 = math.MinInt64 - wblSamples []record.RefSample - wblHistograms []record.RefHistogramSample - wblFloatHistograms []record.RefFloatHistogramSample - oooMmapMarkers map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef - oooMmapMarkersCount int - oooRecords [][]byte - oooCapMax = a.head.opts.OutOfOrderCapMax.Load() - series *memSeries - appendChunkOpts = chunkOpts{ - chunkDiskMapper: a.head.chunkDiskMapper, - chunkRange: a.head.chunkRange.Load(), - samplesPerChunk: a.head.opts.SamplesPerChunk, - } - enc record.Encoder - ) - defer func() { - for i := range oooRecords { - a.head.putBytesBuffer(oooRecords[i][:0]) - } - }() - collectOOORecords := func() { - if a.head.wbl == nil { - // WBL is not enabled. So no need to collect. - wblSamples = nil - wblHistograms = nil - wblFloatHistograms = nil - oooMmapMarkers = nil - oooMmapMarkersCount = 0 - return - } - // The m-map happens before adding a new sample. So we collect - // the m-map markers first, and then samples. - // WBL Graphically: - // WBL Before this Commit(): [old samples before this commit for chunk 1] - // WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3] - if oooMmapMarkers != nil { - markers := make([]record.RefMmapMarker, 0, oooMmapMarkersCount) - for ref, mmapRefs := range oooMmapMarkers { - for _, mmapRef := range mmapRefs { - markers = append(markers, record.RefMmapMarker{ - Ref: ref, - MmapRef: mmapRef, - }) - } + // The m-map happens before adding a new sample. So we collect + // the m-map markers first, and then samples. + // WBL Graphically: + // WBL Before this Commit(): [old samples before this commit for chunk 1] + // WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3] + if acc.oooMmapMarkers != nil { + markers := make([]record.RefMmapMarker, 0, acc.oooMmapMarkersCount) + for ref, mmapRefs := range acc.oooMmapMarkers { + for _, mmapRef := range mmapRefs { + markers = append(markers, record.RefMmapMarker{ + Ref: ref, + MmapRef: mmapRef, + }) } - r := enc.MmapMarkers(markers, a.head.getBytesBuffer()) - oooRecords = append(oooRecords, r) } + r := acc.enc.MmapMarkers(markers, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } - if len(wblSamples) > 0 { - r := enc.Samples(wblSamples, a.head.getBytesBuffer()) - oooRecords = append(oooRecords, r) - } - if len(wblHistograms) > 0 { - r := enc.HistogramSamples(wblHistograms, a.head.getBytesBuffer()) - oooRecords = append(oooRecords, r) - } - if len(wblFloatHistograms) > 0 { - r := enc.FloatHistogramSamples(wblFloatHistograms, a.head.getBytesBuffer()) - oooRecords = append(oooRecords, r) - } + if len(acc.wblSamples) > 0 { + r := acc.enc.Samples(acc.wblSamples, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } + if len(acc.wblHistograms) > 0 { + r := acc.enc.HistogramSamples(acc.wblHistograms, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } + if len(acc.wblFloatHistograms) > 0 { + r := acc.enc.FloatHistogramSamples(acc.wblFloatHistograms, a.head.getBytesBuffer()) + acc.oooRecords = append(acc.oooRecords, r) + } + + acc.wblSamples = nil + acc.wblHistograms = nil + acc.wblFloatHistograms = nil + acc.oooMmapMarkers = nil +} - wblSamples = nil - wblHistograms = nil - wblFloatHistograms = nil - oooMmapMarkers = nil +// handleAppendableError processes errors encountered during sample appending and updates +// the provided counters accordingly. +// +// Parameters: +// - err: The error encountered during appending. +// - appended: Pointer to the counter tracking the number of successfully appended samples. +// - oooRejected: Pointer to the counter tracking the number of out-of-order samples rejected. +// - oobRejected: Pointer to the counter tracking the number of out-of-bounds samples rejected. +// - tooOldRejected: Pointer to the counter tracking the number of too-old samples rejected. +func handleAppendableError(err error, appended, oooRejected, oobRejected, tooOldRejected *int) { + switch { + case errors.Is(err, storage.ErrOutOfOrderSample): + *appended-- + *oooRejected++ + case errors.Is(err, storage.ErrOutOfBounds): + *appended-- + *oobRejected++ + case errors.Is(err, storage.ErrTooOldSample): + *appended-- + *tooOldRejected++ + default: + *appended-- } +} + +// commitSamples processes and commits the samples in the headAppender to the series. +// It handles both in-order and out-of-order samples, updating the appenderCommitContext +// with the results of the append operations. +// +// The function iterates over the samples in the headAppender and attempts to append each sample +// to its corresponding series. It handles various error cases such as out-of-order samples, +// out-of-bounds samples, and too-old samples, updating the appenderCommitContext accordingly. +// +// For out-of-order samples, it checks if the sample can be inserted into the series and updates +// the out-of-order mmap markers if necessary. It also updates the write-ahead log (WBL) samples +// and the minimum and maximum timestamps for out-of-order samples. +// +// For in-order samples, it attempts to append the sample to the series and updates the minimum +// and maximum timestamps for in-order samples. +// +// The function also increments the chunk metrics if a new chunk is created and performs cleanup +// operations on the series after appending the samples. +// +// There are also specific functions to commit histograms and float histograms. +func (a *headAppender) commitSamples(acc *appenderCommitContext) { + var ok, chunkCreated bool + var series *memSeries + for i, s := range a.samples { series = a.sampleSeries[i] series.Lock() oooSample, _, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, a.oooTimeWindow) - switch { - case err == nil: - // Do nothing. - case errors.Is(err, storage.ErrOutOfOrderSample): - floatsAppended-- - floatOOORejected++ - case errors.Is(err, storage.ErrOutOfBounds): - floatsAppended-- - floatOOBRejected++ - case errors.Is(err, storage.ErrTooOldSample): - floatsAppended-- - floatTooOldRejected++ - default: - floatsAppended-- + if err != nil { + handleAppendableError(err, &acc.floatsAppended, &acc.floatOOORejected, &acc.floatOOBRejected, &acc.floatTooOldRejected) } - var ok, chunkCreated bool - switch { case err != nil: // Do nothing here. @@ -1023,9 +1148,9 @@ func (a *headAppender) Commit() (err error) { // Sample is OOO and OOO handling is enabled // and the delta is within the OOO tolerance. var mmapRefs []chunks.ChunkDiskMapperRef - ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, nil, nil, a.head.chunkDiskMapper, oooCapMax, a.head.logger) + ok, chunkCreated, mmapRefs = series.insert(s.T, s.V, nil, nil, a.head.chunkDiskMapper, acc.oooCapMax, a.head.logger) if chunkCreated { - r, ok := oooMmapMarkers[series.ref] + r, ok := acc.oooMmapMarkers[series.ref] if !ok || r != nil { // !ok means there are no markers collected for these samples yet. So we first flush the samples // before setting this m-map marker. @@ -1033,49 +1158,49 @@ func (a *headAppender) Commit() (err error) { // r != nil means we have already m-mapped a chunk for this series in the same Commit(). // Hence, before we m-map again, we should add the samples and m-map markers // seen till now to the WBL records. - collectOOORecords() + acc.collectOOORecords(a) } - if oooMmapMarkers == nil { - oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) + if acc.oooMmapMarkers == nil { + acc.oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) } if len(mmapRefs) > 0 { - oooMmapMarkers[series.ref] = mmapRefs - oooMmapMarkersCount += len(mmapRefs) + acc.oooMmapMarkers[series.ref] = mmapRefs + acc.oooMmapMarkersCount += len(mmapRefs) } else { // No chunk was written to disk, so we need to set an initial marker for this series. - oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} - oooMmapMarkersCount++ + acc.oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} + acc.oooMmapMarkersCount++ } } if ok { - wblSamples = append(wblSamples, s) - if s.T < oooMinT { - oooMinT = s.T + acc.wblSamples = append(acc.wblSamples, s) + if s.T < acc.oooMinT { + acc.oooMinT = s.T } - if s.T > oooMaxT { - oooMaxT = s.T + if s.T > acc.oooMaxT { + acc.oooMaxT = s.T } - oooFloatsAccepted++ + acc.oooFloatsAccepted++ } else { // Sample is an exact duplicate of the last sample. // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, // not with samples in already flushed OOO chunks. // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. - floatsAppended-- + acc.floatsAppended-- } default: - ok, chunkCreated = series.append(s.T, s.V, a.appendID, appendChunkOpts) + ok, chunkCreated = series.append(s.T, s.V, a.appendID, acc.appendChunkOpts) if ok { - if s.T < inOrderMint { - inOrderMint = s.T + if s.T < acc.inOrderMint { + acc.inOrderMint = s.T } - if s.T > inOrderMaxt { - inOrderMaxt = s.T + if s.T > acc.inOrderMaxt { + acc.inOrderMaxt = s.T } } else { // The sample is an exact duplicate, and should be silently dropped. - floatsAppended-- + acc.floatsAppended-- } } @@ -1088,30 +1213,22 @@ func (a *headAppender) Commit() (err error) { series.pendingCommit = false series.Unlock() } +} + +// For details on the commitHistograms function, see the commitSamples docs. +func (a *headAppender) commitHistograms(acc *appenderCommitContext) { + var ok, chunkCreated bool + var series *memSeries for i, s := range a.histograms { series = a.histogramSeries[i] series.Lock() oooSample, _, err := series.appendableHistogram(s.T, s.H, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) - switch { - case err == nil: - // Do nothing. - case errors.Is(err, storage.ErrOutOfOrderSample): - histogramsAppended-- - histoOOORejected++ - case errors.Is(err, storage.ErrOutOfBounds): - histogramsAppended-- - histoOOBRejected++ - case errors.Is(err, storage.ErrTooOldSample): - histogramsAppended-- - histoTooOldRejected++ - default: - histogramsAppended-- + if err != nil { + handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected) } - var ok, chunkCreated bool - switch { case err != nil: // Do nothing here. @@ -1119,9 +1236,9 @@ func (a *headAppender) Commit() (err error) { // Sample is OOO and OOO handling is enabled // and the delta is within the OOO tolerance. var mmapRefs []chunks.ChunkDiskMapperRef - ok, chunkCreated, mmapRefs = series.insert(s.T, 0, s.H, nil, a.head.chunkDiskMapper, oooCapMax, a.head.logger) + ok, chunkCreated, mmapRefs = series.insert(s.T, 0, s.H, nil, a.head.chunkDiskMapper, acc.oooCapMax, a.head.logger) if chunkCreated { - r, ok := oooMmapMarkers[series.ref] + r, ok := acc.oooMmapMarkers[series.ref] if !ok || r != nil { // !ok means there are no markers collected for these samples yet. So we first flush the samples // before setting this m-map marker. @@ -1129,49 +1246,49 @@ func (a *headAppender) Commit() (err error) { // r != 0 means we have already m-mapped a chunk for this series in the same Commit(). // Hence, before we m-map again, we should add the samples and m-map markers // seen till now to the WBL records. - collectOOORecords() + acc.collectOOORecords(a) } - if oooMmapMarkers == nil { - oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) + if acc.oooMmapMarkers == nil { + acc.oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) } if len(mmapRefs) > 0 { - oooMmapMarkers[series.ref] = mmapRefs - oooMmapMarkersCount += len(mmapRefs) + acc.oooMmapMarkers[series.ref] = mmapRefs + acc.oooMmapMarkersCount += len(mmapRefs) } else { // No chunk was written to disk, so we need to set an initial marker for this series. - oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} - oooMmapMarkersCount++ + acc.oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} + acc.oooMmapMarkersCount++ } } if ok { - wblHistograms = append(wblHistograms, s) - if s.T < oooMinT { - oooMinT = s.T + acc.wblHistograms = append(acc.wblHistograms, s) + if s.T < acc.oooMinT { + acc.oooMinT = s.T } - if s.T > oooMaxT { - oooMaxT = s.T + if s.T > acc.oooMaxT { + acc.oooMaxT = s.T } - oooHistogramAccepted++ + acc.oooHistogramAccepted++ } else { // Sample is an exact duplicate of the last sample. // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, // not with samples in already flushed OOO chunks. // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. - histogramsAppended-- + acc.histogramsAppended-- } default: - ok, chunkCreated = series.appendHistogram(s.T, s.H, a.appendID, appendChunkOpts) + ok, chunkCreated = series.appendHistogram(s.T, s.H, a.appendID, acc.appendChunkOpts) if ok { - if s.T < inOrderMint { - inOrderMint = s.T + if s.T < acc.inOrderMint { + acc.inOrderMint = s.T } - if s.T > inOrderMaxt { - inOrderMaxt = s.T + if s.T > acc.inOrderMaxt { + acc.inOrderMaxt = s.T } } else { - histogramsAppended-- - histoOOORejected++ + acc.histogramsAppended-- + acc.histoOOORejected++ } } @@ -1184,30 +1301,22 @@ func (a *headAppender) Commit() (err error) { series.pendingCommit = false series.Unlock() } +} + +// For details on the commitFloatHistograms function, see the commitSamples docs. +func (a *headAppender) commitFloatHistograms(acc *appenderCommitContext) { + var ok, chunkCreated bool + var series *memSeries for i, s := range a.floatHistograms { series = a.floatHistogramSeries[i] series.Lock() oooSample, _, err := series.appendableFloatHistogram(s.T, s.FH, a.headMaxt, a.minValidTime, a.oooTimeWindow, a.head.opts.EnableOOONativeHistograms.Load()) - switch { - case err == nil: - // Do nothing. - case errors.Is(err, storage.ErrOutOfOrderSample): - histogramsAppended-- - histoOOORejected++ - case errors.Is(err, storage.ErrOutOfBounds): - histogramsAppended-- - histoOOBRejected++ - case errors.Is(err, storage.ErrTooOldSample): - histogramsAppended-- - histoTooOldRejected++ - default: - histogramsAppended-- + if err != nil { + handleAppendableError(err, &acc.histogramsAppended, &acc.histoOOORejected, &acc.histoOOBRejected, &acc.histoTooOldRejected) } - var ok, chunkCreated bool - switch { case err != nil: // Do nothing here. @@ -1215,9 +1324,9 @@ func (a *headAppender) Commit() (err error) { // Sample is OOO and OOO handling is enabled // and the delta is within the OOO tolerance. var mmapRefs []chunks.ChunkDiskMapperRef - ok, chunkCreated, mmapRefs = series.insert(s.T, 0, nil, s.FH, a.head.chunkDiskMapper, oooCapMax, a.head.logger) + ok, chunkCreated, mmapRefs = series.insert(s.T, 0, nil, s.FH, a.head.chunkDiskMapper, acc.oooCapMax, a.head.logger) if chunkCreated { - r, ok := oooMmapMarkers[series.ref] + r, ok := acc.oooMmapMarkers[series.ref] if !ok || r != nil { // !ok means there are no markers collected for these samples yet. So we first flush the samples // before setting this m-map marker. @@ -1225,49 +1334,49 @@ func (a *headAppender) Commit() (err error) { // r != 0 means we have already m-mapped a chunk for this series in the same Commit(). // Hence, before we m-map again, we should add the samples and m-map markers // seen till now to the WBL records. - collectOOORecords() + acc.collectOOORecords(a) } - if oooMmapMarkers == nil { - oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) + if acc.oooMmapMarkers == nil { + acc.oooMmapMarkers = make(map[chunks.HeadSeriesRef][]chunks.ChunkDiskMapperRef) } if len(mmapRefs) > 0 { - oooMmapMarkers[series.ref] = mmapRefs - oooMmapMarkersCount += len(mmapRefs) + acc.oooMmapMarkers[series.ref] = mmapRefs + acc.oooMmapMarkersCount += len(mmapRefs) } else { // No chunk was written to disk, so we need to set an initial marker for this series. - oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} - oooMmapMarkersCount++ + acc.oooMmapMarkers[series.ref] = []chunks.ChunkDiskMapperRef{0} + acc.oooMmapMarkersCount++ } } if ok { - wblFloatHistograms = append(wblFloatHistograms, s) - if s.T < oooMinT { - oooMinT = s.T + acc.wblFloatHistograms = append(acc.wblFloatHistograms, s) + if s.T < acc.oooMinT { + acc.oooMinT = s.T } - if s.T > oooMaxT { - oooMaxT = s.T + if s.T > acc.oooMaxT { + acc.oooMaxT = s.T } - oooHistogramAccepted++ + acc.oooHistogramAccepted++ } else { // Sample is an exact duplicate of the last sample. // NOTE: We can only detect updates if they clash with a sample in the OOOHeadChunk, // not with samples in already flushed OOO chunks. // TODO(codesome): Add error reporting? It depends on addressing https://github.com/prometheus/prometheus/discussions/10305. - histogramsAppended-- + acc.histogramsAppended-- } default: - ok, chunkCreated = series.appendFloatHistogram(s.T, s.FH, a.appendID, appendChunkOpts) + ok, chunkCreated = series.appendFloatHistogram(s.T, s.FH, a.appendID, acc.appendChunkOpts) if ok { - if s.T < inOrderMint { - inOrderMint = s.T + if s.T < acc.inOrderMint { + acc.inOrderMint = s.T } - if s.T > inOrderMaxt { - inOrderMaxt = s.T + if s.T > acc.inOrderMaxt { + acc.inOrderMaxt = s.T } } else { - histogramsAppended-- - histoOOORejected++ + acc.histogramsAppended-- + acc.histoOOORejected++ } } @@ -1280,40 +1389,102 @@ func (a *headAppender) Commit() (err error) { series.pendingCommit = false series.Unlock() } +} +// commitMetadata commits the metadata for each series in the headAppender. +// It iterates over the metadata slice and updates the corresponding series +// with the new metadata information. The series is locked during the update +// to ensure thread safety. +func (a *headAppender) commitMetadata() { + var series *memSeries for i, m := range a.metadata { series = a.metadataSeries[i] series.Lock() series.meta = &metadata.Metadata{Type: record.ToMetricType(m.Type), Unit: m.Unit, Help: m.Help} series.Unlock() } +} - a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOORejected)) - a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histoOOORejected)) - a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatOOBRejected)) - a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatTooOldRejected)) - a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(floatsAppended)) - a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(histogramsAppended)) - a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(oooFloatsAccepted)) - a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(oooHistogramAccepted)) - a.head.updateMinMaxTime(inOrderMint, inOrderMaxt) - a.head.updateMinOOOMaxOOOTime(oooMinT, oooMaxT) +// Commit writes to the WAL and adds the data to the Head. +// TODO(codesome): Refactor this method to reduce indentation and make it more readable. +func (a *headAppender) Commit() (err error) { + if a.closed { + return ErrAppenderClosed + } + defer func() { a.closed = true }() + + if err := a.log(); err != nil { + _ = a.Rollback() // Most likely the same error will happen again. + return fmt.Errorf("write to WAL: %w", err) + } + + if a.head.writeNotified != nil { + a.head.writeNotified.Notify() + } + + a.commitExemplars() + + defer a.head.metrics.activeAppenders.Dec() + defer a.head.putAppendBuffer(a.samples) + defer a.head.putSeriesBuffer(a.sampleSeries) + defer a.head.putExemplarBuffer(a.exemplars) + defer a.head.putHistogramBuffer(a.histograms) + defer a.head.putFloatHistogramBuffer(a.floatHistograms) + defer a.head.putMetadataBuffer(a.metadata) + defer a.head.iso.closeAppend(a.appendID) + + acc := &appenderCommitContext{ + floatsAppended: len(a.samples), + histogramsAppended: len(a.histograms) + len(a.floatHistograms), + inOrderMint: math.MaxInt64, + inOrderMaxt: math.MinInt64, + oooMinT: math.MaxInt64, + oooMaxT: math.MinInt64, + oooCapMax: a.head.opts.OutOfOrderCapMax.Load(), + appendChunkOpts: chunkOpts{ + chunkDiskMapper: a.head.chunkDiskMapper, + chunkRange: a.head.chunkRange.Load(), + samplesPerChunk: a.head.opts.SamplesPerChunk, + }, + } + + defer func() { + for i := range acc.oooRecords { + a.head.putBytesBuffer(acc.oooRecords[i][:0]) + } + }() - collectOOORecords() + a.commitSamples(acc) + a.commitHistograms(acc) + a.commitFloatHistograms(acc) + a.commitMetadata() + + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOORejected)) + a.head.metrics.outOfOrderSamples.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histoOOORejected)) + a.head.metrics.outOfBoundSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatOOBRejected)) + a.head.metrics.tooOldSamples.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatTooOldRejected)) + a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.floatsAppended)) + a.head.metrics.samplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.histogramsAppended)) + a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeFloat).Add(float64(acc.oooFloatsAccepted)) + a.head.metrics.outOfOrderSamplesAppended.WithLabelValues(sampleMetricTypeHistogram).Add(float64(acc.oooHistogramAccepted)) + a.head.updateMinMaxTime(acc.inOrderMint, acc.inOrderMaxt) + a.head.updateMinOOOMaxOOOTime(acc.oooMinT, acc.oooMaxT) + + acc.collectOOORecords(a) if a.head.wbl != nil { - if err := a.head.wbl.Log(oooRecords...); err != nil { + if err := a.head.wbl.Log(acc.oooRecords...); err != nil { // TODO(codesome): Currently WBL logging of ooo samples is best effort here since we cannot try logging // until we have found what samples become OOO. We can try having a metric for this failure. // Returning the error here is not correct because we have already put the samples into the memory, // hence the append/insert was a success. - level.Error(a.head.logger).Log("msg", "Failed to log out of order samples into the WAL", "err", err) + a.head.logger.Error("Failed to log out of order samples into the WAL", "err", err) } } return nil } // insert is like append, except it inserts. Used for OOO samples. -func (s *memSeries) insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64, logger log.Logger) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) { +func (s *memSeries) insert(t int64, v float64, h *histogram.Histogram, fh *histogram.FloatHistogram, chunkDiskMapper *chunks.ChunkDiskMapper, oooCapMax int64, logger *slog.Logger) (inserted, chunkCreated bool, mmapRefs []chunks.ChunkDiskMapperRef) { if s.ooo == nil { s.ooo = &memSeriesOOOFields{} } @@ -1702,7 +1873,7 @@ func (s *memSeries) cutNewHeadChunk(mint int64, e chunkenc.Encoding, chunkRange // cutNewOOOHeadChunk cuts a new OOO chunk and m-maps the old chunk. // The caller must ensure that s is locked and s.ooo is not nil. -func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) { +func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.ChunkDiskMapper, logger *slog.Logger) (*oooHeadChunk, []chunks.ChunkDiskMapperRef) { ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper, logger) s.ooo.oooHeadChunk = &oooHeadChunk{ @@ -1715,7 +1886,7 @@ func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper *chunks.Chunk } // s must be locked when calling. -func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper, logger log.Logger) []chunks.ChunkDiskMapperRef { +func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMapper, logger *slog.Logger) []chunks.ChunkDiskMapperRef { if s.ooo == nil || s.ooo.oooHeadChunk == nil { // OOO is not enabled or there is no head chunk, so nothing to m-map here. return nil @@ -1728,7 +1899,7 @@ func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper *chunks.ChunkDiskMap chunkRefs := make([]chunks.ChunkDiskMapperRef, 0, len(chks)) for _, memchunk := range chks { if len(s.ooo.oooMmappedChunks) >= (oooChunkIDMask - 1) { - level.Error(logger).Log("msg", "Too many OOO chunks, dropping data", "series", s.lset.String()) + logger.Error("Too many OOO chunks, dropping data", "series", s.lset.String()) break } chunkRef := chunkDiskMapper.WriteChunk(s.ref, memchunk.minTime, memchunk.maxTime, memchunk.chunk, true, handleChunkWriteError) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_dedupelabels.go b/vendor/github.com/prometheus/prometheus/tsdb/head_dedupelabels.go index a16d9072612..a75f3372245 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_dedupelabels.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_dedupelabels.go @@ -16,8 +16,7 @@ package tsdb import ( - "github.com/go-kit/log" - "github.com/go-kit/log/level" + "log/slog" "github.com/prometheus/prometheus/model/labels" ) @@ -31,8 +30,8 @@ func (s *memSeries) labels() labels.Labels { // RebuildSymbolTable goes through all the series in h, build a SymbolTable with all names and values, // replace each series' Labels with one using that SymbolTable. -func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable { - level.Info(logger).Log("msg", "RebuildSymbolTable starting") +func (h *Head) RebuildSymbolTable(logger *slog.Logger) *labels.SymbolTable { + logger.Info("RebuildSymbolTable starting") st := labels.NewSymbolTable() builder := labels.NewScratchBuilderWithSymbolTable(st, 0) rebuildLabels := func(lbls labels.Labels) labels.Labels { @@ -66,7 +65,7 @@ func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable { if e, ok := h.exemplars.(withReset); ok { e.ResetSymbolTable(st) } - level.Info(logger).Log("msg", "RebuildSymbolTable finished", "size", st.Len()) + logger.Info("RebuildSymbolTable finished", "size", st.Len()) return st } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_other.go b/vendor/github.com/prometheus/prometheus/tsdb/head_other.go index fea91530dc7..c73872c12e1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_other.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_other.go @@ -16,7 +16,7 @@ package tsdb import ( - "github.com/go-kit/log" + "log/slog" "github.com/prometheus/prometheus/model/labels" ) @@ -27,6 +27,6 @@ func (s *memSeries) labels() labels.Labels { } // RebuildSymbolTable is a no-op when not using dedupelabels. -func (h *Head) RebuildSymbolTable(logger log.Logger) *labels.SymbolTable { +func (h *Head) RebuildSymbolTable(logger *slog.Logger) *labels.SymbolTable { return nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go index f85120a97f3..f6215c52061 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go @@ -21,8 +21,6 @@ import ( "slices" "sync" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" @@ -136,7 +134,7 @@ func (h *headIndexReader) SortedPostings(p index.Postings) index.Postings { for p.Next() { s := h.head.series.getByID(chunks.HeadSeriesRef(p.At())) if s == nil { - level.Debug(h.head.logger).Log("msg", "Looked up series not found") + h.head.logger.Debug("Looked up series not found") } else { series = append(series, s) } @@ -169,7 +167,7 @@ func (h *headIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCou for p.Next() { s := h.head.series.getByID(chunks.HeadSeriesRef(p.At())) if s == nil { - level.Debug(h.head.logger).Log("msg", "Looked up series not found") + h.head.logger.Debug("Looked up series not found") continue } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go index 674d7080123..6729d770901 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -24,7 +24,6 @@ import ( "sync" "time" - "github.com/go-kit/log/level" "go.uber.org/atomic" "github.com/prometheus/prometheus/model/exemplar" @@ -128,7 +127,7 @@ func (h *Head) loadWAL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch // replaying the WAL, so lets just log the error if it's not that type. err = h.exemplars.AddExemplar(ms.labels(), exemplar.Exemplar{Ts: e.T, Value: e.V, Labels: e.Labels}) if err != nil && errors.Is(err, storage.ErrOutOfOrderExemplar) { - level.Warn(h.logger).Log("msg", "Unexpected error when replaying WAL on exemplar record", "err", err) + h.logger.Warn("Unexpected error when replaying WAL on exemplar record", "err", err) } } }(exemplarsInput) @@ -421,8 +420,8 @@ Outer: } if unknownRefs.Load()+unknownExemplarRefs.Load()+unknownHistogramRefs.Load()+unknownMetadataRefs.Load() > 0 { - level.Warn(h.logger).Log( - "msg", "Unknown series references", + h.logger.Warn( + "Unknown series references", "samples", unknownRefs.Load(), "exemplars", unknownExemplarRefs.Load(), "histograms", unknownHistogramRefs.Load(), @@ -430,7 +429,7 @@ Outer: ) } if count := mmapOverlappingChunks.Load(); count > 0 { - level.Info(h.logger).Log("msg", "Overlapping m-map chunks on duplicate series records", "count", count) + h.logger.Info("Overlapping m-map chunks on duplicate series records", "count", count) } return nil } @@ -446,8 +445,8 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m mmc[0].minTime, mmc[len(mmc)-1].maxTime, ) { - level.Debug(h.logger).Log( - "msg", "M-mapped chunks overlap on a duplicate series record", + h.logger.Debug( + "M-mapped chunks overlap on a duplicate series record", "series", mSeries.labels().String(), "oldref", mSeries.ref, "oldmint", mSeries.mmappedChunks[0].minTime, @@ -913,7 +912,7 @@ func (h *Head) loadWBL(r *wlog.Reader, syms *labels.SymbolTable, multiRef map[ch } if unknownRefs.Load() > 0 || mmapMarkerUnknownRefs.Load() > 0 { - level.Warn(h.logger).Log("msg", "Unknown series references for ooo WAL replay", "samples", unknownRefs.Load(), "mmap_markers", mmapMarkerUnknownRefs.Load()) + h.logger.Warn("Unknown series references for ooo WAL replay", "samples", unknownRefs.Load(), "mmap_markers", mmapMarkerUnknownRefs.Load()) } return nil } @@ -1214,7 +1213,7 @@ const chunkSnapshotPrefix = "chunk_snapshot." func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { if h.wal == nil { // If we are not storing any WAL, does not make sense to take a snapshot too. - level.Warn(h.logger).Log("msg", "skipping chunk snapshotting as WAL is disabled") + h.logger.Warn("skipping chunk snapshotting as WAL is disabled") return &ChunkSnapshotStats{}, nil } h.chunkSnapshotMtx.Lock() @@ -1363,7 +1362,7 @@ func (h *Head) ChunkSnapshot() (*ChunkSnapshotStats, error) { // Leftover old chunk snapshots do not cause problems down the line beyond // occupying disk space. // They will just be ignored since a higher chunk snapshot exists. - level.Error(h.logger).Log("msg", "delete old chunk snapshots", "err", err) + h.logger.Error("delete old chunk snapshots", "err", err) } return stats, nil } @@ -1373,12 +1372,12 @@ func chunkSnapshotDir(wlast, woffset int) string { } func (h *Head) performChunkSnapshot() error { - level.Info(h.logger).Log("msg", "creating chunk snapshot") + h.logger.Info("creating chunk snapshot") startTime := time.Now() stats, err := h.ChunkSnapshot() elapsed := time.Since(startTime) if err == nil { - level.Info(h.logger).Log("msg", "chunk snapshot complete", "duration", elapsed.String(), "num_series", stats.TotalSeries, "dir", stats.Dir) + h.logger.Info("chunk snapshot complete", "duration", elapsed.String(), "num_series", stats.TotalSeries, "dir", stats.Dir) } if err != nil { return fmt.Errorf("chunk snapshot: %w", err) @@ -1493,7 +1492,7 @@ func (h *Head) loadChunkSnapshot() (int, int, map[chunks.HeadSeriesRef]*memSerie } defer func() { if err := sr.Close(); err != nil { - level.Warn(h.logger).Log("msg", "error while closing the wal segments reader", "err", err) + h.logger.Warn("error while closing the wal segments reader", "err", err) } }() @@ -1682,9 +1681,9 @@ Outer: } elapsed := time.Since(start) - level.Info(h.logger).Log("msg", "chunk snapshot loaded", "dir", dir, "num_series", numSeries, "duration", elapsed.String()) + h.logger.Info("chunk snapshot loaded", "dir", dir, "num_series", numSeries, "duration", elapsed.String()) if unknownRefs > 0 { - level.Warn(h.logger).Log("msg", "unknown series references during chunk snapshot replay", "count", unknownRefs) + h.logger.Warn("unknown series references during chunk snapshot replay", "count", unknownRefs) } return snapIdx, snapOffset, refSeries, nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go index 9eaa0a97d2b..957106b6042 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go @@ -44,10 +44,12 @@ const ( // HeaderLen represents number of bytes reserved of index for header. HeaderLen = 5 - // FormatV1 represents 1 version of index. + // FormatV1 represents version 1 of index. FormatV1 = 1 - // FormatV2 represents 2 version of index. + // FormatV2 represents version 2 of index. FormatV2 = 2 + // FormatV3 represents version 3 of index. + FormatV3 = 3 indexFilename = "index" @@ -437,7 +439,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... return err } if labels.Compare(lset, w.lastSeries) <= 0 { - return fmt.Errorf("out-of-order series added with label set %q", lset) + return fmt.Errorf("out-of-order series added with label set %q, last label set %q", lset, w.lastSeries) } if ref < w.lastSeriesRef && !w.lastSeries.IsEmpty() { @@ -1212,7 +1214,9 @@ func newReader(b ByteSlice, c io.Closer, cacheProvider ReaderCacheProvider) (*Re } r.version = int(r.b.Range(4, 5)[0]) - if r.version != FormatV1 && r.version != FormatV2 { + switch r.version { + case FormatV1, FormatV2, FormatV3: + default: return nil, fmt.Errorf("unknown index file version %d", r.version) } @@ -1370,7 +1374,9 @@ func (s Symbols) Lookup(o uint32) (string, error) { B: s.bs.Range(0, s.bs.Len()), } - if s.version == FormatV2 { + if s.version == FormatV1 { + d.Skip(int(o)) + } else { if int(o) >= s.seen { return "", fmt.Errorf("unknown symbol offset %d", o) } @@ -1379,8 +1385,6 @@ func (s Symbols) Lookup(o uint32) (string, error) { for i := o - (o / symbolFactor * symbolFactor); i > 0; i-- { d.UvarintBytes() } - } else { - d.Skip(int(o)) } sym := d.UvarintStr() if d.Err() != nil { @@ -1426,10 +1430,10 @@ func (s Symbols) ReverseLookup(sym string) (uint32, error) { if lastSymbol != sym { return 0, fmt.Errorf("unknown symbol %q", sym) } - if s.version == FormatV2 { - return uint32(res), nil + if s.version == FormatV1 { + return uint32(s.bs.Len() - lastLen), nil } - return uint32(s.bs.Len() - lastLen), nil + return uint32(res), nil } func (s Symbols) Size() int { @@ -1588,7 +1592,7 @@ func (r *Reader) LabelNamesFor(ctx context.Context, postings Postings) ([]string offset := id // In version 2 series IDs are no longer exact references but series are 16-byte padded // and the ID is the multiple of 16 of the actual position. - if r.version == FormatV2 { + if r.version != FormatV1 { offset = id * seriesByteAlign } @@ -1627,7 +1631,7 @@ func (r *Reader) LabelValueFor(ctx context.Context, id storage.SeriesRef, label offset := id // In version 2 series IDs are no longer exact references but series are 16-byte padded // and the ID is the multiple of 16 of the actual position. - if r.version == FormatV2 { + if r.version != FormatV1 { offset = id * seriesByteAlign } d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable) @@ -1653,7 +1657,7 @@ func (r *Reader) Series(id storage.SeriesRef, builder *labels.ScratchBuilder, ch offset := id // In version 2 series IDs are no longer exact references but series are 16-byte padded // and the ID is the multiple of 16 of the actual position. - if r.version == FormatV2 { + if r.version != FormatV1 { offset = id * seriesByteAlign } d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable) @@ -2106,5 +2110,5 @@ func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, chks *[]chu } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index 6005ad83ce5..e506153ebaa 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -253,6 +253,10 @@ func PostingsForMatchers(ctx context.Context, ix IndexPostingsReader, ms ...*lab return nil, err } its = append(its, allPostings) + case m.Type == labels.MatchRegexp && m.Value == ".*": + // .* regexp matches any string: do nothing. + case m.Type == labels.MatchNotRegexp && m.Value == ".*": + return index.EmptyPostings(), nil case labelMustBeSet[m.Name]: // If this matcher must be non-empty, we can be smarter. matchesEmpty := m.Matches("") diff --git a/vendor/github.com/prometheus/prometheus/tsdb/repair.go b/vendor/github.com/prometheus/prometheus/tsdb/repair.go index 9d2c5738d17..8bdc645b5e3 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/repair.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/repair.go @@ -17,19 +17,17 @@ import ( "encoding/json" "fmt" "io" + "log/slog" "os" "path/filepath" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" ) // repairBadIndexVersion repairs an issue in index and meta.json persistence introduced in // commit 129773b41a565fde5156301e37f9a87158030443. -func repairBadIndexVersion(logger log.Logger, dir string) error { +func repairBadIndexVersion(logger *slog.Logger, dir string) error { // All blocks written by Prometheus 2.1 with a meta.json version of 2 are affected. // We must actually set the index file version to 2 and revert the meta.json version back to 1. dirs, err := blockDirs(dir) @@ -41,7 +39,7 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { defer func() { for _, tmp := range tmpFiles { if err := os.RemoveAll(tmp); err != nil { - level.Error(logger).Log("msg", "remove tmp file", "err", err.Error()) + logger.Error("remove tmp file", "err", err.Error()) } } }() @@ -49,20 +47,20 @@ func repairBadIndexVersion(logger log.Logger, dir string) error { for _, d := range dirs { meta, err := readBogusMetaFile(d) if err != nil { - level.Error(logger).Log("msg", "failed to read meta.json for a block during repair process; skipping", "dir", d, "err", err) + logger.Error("failed to read meta.json for a block during repair process; skipping", "dir", d, "err", err) continue } if meta.Version == metaVersion1 { - level.Info(logger).Log( - "msg", "Found healthy block", + logger.Info( + "Found healthy block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, ) continue } - level.Info(logger).Log( - "msg", "Fixing broken block", + logger.Info( + "Fixing broken block", "mint", meta.MinTime, "maxt", meta.MaxTime, "ulid", meta.ULID, diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go b/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go index 4cea5005dbc..dcba298f3bb 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go @@ -19,15 +19,13 @@ import ( "fmt" "hash" "hash/crc32" + "log/slog" "math" "os" "path/filepath" "sort" "sync" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/encoding" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -76,7 +74,7 @@ type Reader interface { Close() error } -func WriteFile(logger log.Logger, dir string, tr Reader) (int64, error) { +func WriteFile(logger *slog.Logger, dir string, tr Reader) (int64, error) { path := filepath.Join(dir, TombstonesFilename) tmp := path + ".tmp" hash := newCRC32() @@ -89,11 +87,11 @@ func WriteFile(logger log.Logger, dir string, tr Reader) (int64, error) { defer func() { if f != nil { if err := f.Close(); err != nil { - level.Error(logger).Log("msg", "close tmp file", "err", err.Error()) + logger.Error("close tmp file", "err", err.Error()) } } if err := os.RemoveAll(tmp); err != nil { - level.Error(logger).Log("msg", "remove tmp file", "err", err.Error()) + logger.Error("remove tmp file", "err", err.Error()) } }() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go index f7b27c2e08e..b49757223f1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbblockutil.go @@ -16,10 +16,9 @@ package tsdb import ( "context" "fmt" + "log/slog" "path/filepath" - "github.com/go-kit/log" - "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" ) @@ -27,7 +26,7 @@ import ( var ErrInvalidTimes = fmt.Errorf("max time is lesser than min time") // CreateBlock creates a chunkrange block from the samples passed to it, and writes it to disk. -func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger log.Logger) (string, error) { +func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger *slog.Logger) (string, error) { if chunkRange == 0 { chunkRange = DefaultBlockDuration } @@ -41,7 +40,7 @@ func CreateBlock(series []storage.Series, dir string, chunkRange int64, logger l } defer func() { if err := w.Close(); err != nil { - logger.Log("err closing blockwriter", err.Error()) + logger.Error("err closing blockwriter", "err", err.Error()) } }() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker.go index fa939879cad..4b69e1f9d61 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker.go @@ -16,11 +16,10 @@ package tsdbutil import ( "errors" "fmt" + "log/slog" "os" "path/filepath" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -34,7 +33,7 @@ const ( ) type DirLocker struct { - logger log.Logger + logger *slog.Logger createdCleanly prometheus.Gauge @@ -43,7 +42,7 @@ type DirLocker struct { } // NewDirLocker creates a DirLocker that can obtain an exclusive lock on dir. -func NewDirLocker(dir, subsystem string, l log.Logger, r prometheus.Registerer) (*DirLocker, error) { +func NewDirLocker(dir, subsystem string, l *slog.Logger, r prometheus.Registerer) (*DirLocker, error) { lock := &DirLocker{ logger: l, createdCleanly: prometheus.NewGauge(prometheus.GaugeOpts{ @@ -74,7 +73,7 @@ func (l *DirLocker) Lock() error { } if _, err := os.Stat(l.path); err == nil { - level.Warn(l.logger).Log("msg", "A lockfile from a previous execution already existed. It was replaced", "file", l.path) + l.logger.Warn("A lockfile from a previous execution already existed. It was replaced", "file", l.path) l.createdCleanly.Set(lockfileReplaced) } else { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker_testutil.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker_testutil.go index a4cf5abd68c..7228dbafed6 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker_testutil.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker_testutil.go @@ -18,8 +18,8 @@ import ( "os" "testing" - "github.com/go-kit/log" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/promslog" "github.com/stretchr/testify/require" "github.com/prometheus/prometheus/util/testutil" @@ -68,7 +68,7 @@ func TestDirLockerUsage(t *testing.T, open func(t *testing.T, data string, creat // Test preconditions (file already exists + lockfile option) if c.fileAlreadyExists { - tmpLocker, err := NewDirLocker(tmpdir, "tsdb", log.NewNopLogger(), nil) + tmpLocker, err := NewDirLocker(tmpdir, "tsdb", promslog.NewNopLogger(), nil) require.NoError(t, err) err = os.WriteFile(tmpLocker.path, []byte{}, 0o644) require.NoError(t, err) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go index a16cd5fc749..58e11c770e0 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go @@ -18,6 +18,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "os" "path/filepath" @@ -25,9 +26,6 @@ import ( "strconv" "strings" - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunks" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -94,11 +92,11 @@ const checkpointPrefix = "checkpoint." // segmented format as the original WAL itself. // This makes it easy to read it through the WAL package and concatenate // it with the original WAL. -func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) { +func Checkpoint(logger *slog.Logger, w *WL, from, to int, keep func(id chunks.HeadSeriesRef) bool, mint int64) (*CheckpointStats, error) { stats := &CheckpointStats{} var sgmReader io.ReadCloser - level.Info(logger).Log("msg", "Creating checkpoint", "from_segment", from, "to_segment", to, "mint", mint) + logger.Info("Creating checkpoint", "from_segment", from, "to_segment", to, "mint", mint) { var sgmRange []SegmentRange diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go index 6eaef5f3960..a017d362d15 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go @@ -20,9 +20,8 @@ import ( "fmt" "hash/crc32" "io" + "log/slog" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/golang/snappy" "github.com/klauspost/compress/zstd" "github.com/prometheus/client_golang/prometheus" @@ -51,7 +50,7 @@ func NewLiveReaderMetrics(reg prometheus.Registerer) *LiveReaderMetrics { } // NewLiveReader returns a new live reader. -func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader { +func NewLiveReader(logger *slog.Logger, metrics *LiveReaderMetrics, r io.Reader) *LiveReader { // Calling zstd.NewReader with a nil io.Reader and no options cannot return an error. zstdReader, _ := zstd.NewReader(nil) @@ -73,7 +72,7 @@ func NewLiveReader(logger log.Logger, metrics *LiveReaderMetrics, r io.Reader) * // that are still in the process of being written, and returns records as soon // as they can be read. type LiveReader struct { - logger log.Logger + logger *slog.Logger rdr io.Reader err error rec []byte @@ -311,7 +310,7 @@ func (r *LiveReader) readRecord() ([]byte, int, error) { return nil, 0, fmt.Errorf("record would overflow current page: %d > %d", r.readIndex+recordHeaderSize+length, pageSize) } r.metrics.readerCorruptionErrors.WithLabelValues("record_span_page").Inc() - level.Warn(r.logger).Log("msg", "Record spans page boundaries", "start", r.readIndex, "end", recordHeaderSize+length, "pageSize", pageSize) + r.logger.Warn("Record spans page boundaries", "start", r.readIndex, "end", recordHeaderSize+length, "pageSize", pageSize) } if recordHeaderSize+length > pageSize { return nil, 0, fmt.Errorf("record length greater than a single page: %d > %d", recordHeaderSize+length, pageSize) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go index ac5041e87b9..d68ef2accb8 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go @@ -17,6 +17,7 @@ import ( "errors" "fmt" "io" + "log/slog" "math" "os" "path/filepath" @@ -24,9 +25,8 @@ import ( "strings" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/timestamp" @@ -84,7 +84,7 @@ type WatcherMetrics struct { type Watcher struct { name string writer WriteTo - logger log.Logger + logger *slog.Logger walDir string lastCheckpoint string sendExemplars bool @@ -172,9 +172,9 @@ func NewWatcherMetrics(reg prometheus.Registerer) *WatcherMetrics { } // NewWatcher creates a new WAL watcher for a given WriteTo. -func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger log.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms, sendMetadata bool) *Watcher { +func NewWatcher(metrics *WatcherMetrics, readerMetrics *LiveReaderMetrics, logger *slog.Logger, name string, writer WriteTo, dir string, sendExemplars, sendHistograms, sendMetadata bool) *Watcher { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } return &Watcher{ logger: logger, @@ -222,7 +222,7 @@ func (w *Watcher) setMetrics() { // Start the Watcher. func (w *Watcher) Start() { w.setMetrics() - level.Info(w.logger).Log("msg", "Starting WAL watcher", "queue", w.name) + w.logger.Info("Starting WAL watcher", "queue", w.name) go w.loop() } @@ -241,7 +241,7 @@ func (w *Watcher) Stop() { w.metrics.currentSegment.DeleteLabelValues(w.name) } - level.Info(w.logger).Log("msg", "WAL watcher stopped", "queue", w.name) + w.logger.Info("WAL watcher stopped", "queue", w.name) } func (w *Watcher) loop() { @@ -251,7 +251,7 @@ func (w *Watcher) loop() { for !isClosed(w.quit) { w.SetStartTime(time.Now()) if err := w.Run(); err != nil { - level.Error(w.logger).Log("msg", "error tailing WAL", "err", err) + w.logger.Error("error tailing WAL", "err", err) } select { @@ -274,7 +274,7 @@ func (w *Watcher) Run() error { // Run will be called again if there was a failure to read the WAL. w.sendSamples = false - level.Info(w.logger).Log("msg", "Replaying WAL", "queue", w.name) + w.logger.Info("Replaying WAL", "queue", w.name) // Backfill from the checkpoint first if it exists. lastCheckpoint, checkpointIndex, err := LastCheckpoint(w.walDir) @@ -294,13 +294,13 @@ func (w *Watcher) Run() error { return err } - level.Debug(w.logger).Log("msg", "Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment) + w.logger.Debug("Tailing WAL", "lastCheckpoint", lastCheckpoint, "checkpointIndex", checkpointIndex, "currentSegment", currentSegment, "lastSegment", lastSegment) for !isClosed(w.quit) { w.currentSegmentMetric.Set(float64(currentSegment)) // On start, after reading the existing WAL for series records, we have a pointer to what is the latest segment. // On subsequent calls to this function, currentSegment will have been incremented and we should open that segment. - level.Debug(w.logger).Log("msg", "Processing segment", "currentSegment", currentSegment) + w.logger.Debug("Processing segment", "currentSegment", currentSegment) if err := w.watch(currentSegment, currentSegment >= lastSegment); err != nil && !errors.Is(err, ErrIgnorable) { return err } @@ -338,9 +338,9 @@ func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, s // Ignore all errors reading to end of segment whilst replaying the WAL. if !tail { if err != nil && !errors.Is(err, io.EOF) { - level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) + w.logger.Warn("Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) } else if r.Offset() != size { - level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", r.Offset(), "size", size) + w.logger.Warn("Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", r.Offset(), "size", size) } return ErrIgnorable } @@ -403,7 +403,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { <-gcSem }() if err := w.garbageCollectSeries(segmentNum); err != nil { - level.Warn(w.logger).Log("msg", "Error process checkpoint", "err", err) + w.logger.Warn("Error process checkpoint", "err", err) } }() default: @@ -424,7 +424,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { // we haven't read due to a notification in quite some time, try reading anyways case <-readTicker.C: - level.Debug(w.logger).Log("msg", "Watcher is reading the WAL due to timeout, haven't received any write notifications recently", "timeout", readTimeout) + w.logger.Debug("Watcher is reading the WAL due to timeout, haven't received any write notifications recently", "timeout", readTimeout) err := w.readAndHandleError(reader, segmentNum, tail, size) if err != nil { return err @@ -460,11 +460,11 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error { } if index >= segmentNum { - level.Debug(w.logger).Log("msg", "Current segment is behind the checkpoint, skipping reading of checkpoint", "current", fmt.Sprintf("%08d", segmentNum), "checkpoint", dir) + w.logger.Debug("Current segment is behind the checkpoint, skipping reading of checkpoint", "current", fmt.Sprintf("%08d", segmentNum), "checkpoint", dir) return nil } - level.Debug(w.logger).Log("msg", "New checkpoint detected", "new", dir, "currentSegment", segmentNum) + w.logger.Debug("New checkpoint detected", "new", dir, "currentSegment", segmentNum) if err = w.readCheckpoint(dir, (*Watcher).readSegmentForGC); err != nil { return fmt.Errorf("readCheckpoint: %w", err) @@ -519,7 +519,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !w.sendSamples { w.sendSamples = true duration := time.Since(w.startTime) - level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration) + w.logger.Info("Done replaying WAL", "duration", duration) } samplesToSend = append(samplesToSend, s) } @@ -564,7 +564,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !w.sendSamples { w.sendSamples = true duration := time.Since(w.startTime) - level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration) + w.logger.Info("Done replaying WAL", "duration", duration) } histogramsToSend = append(histogramsToSend, h) } @@ -592,7 +592,7 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { if !w.sendSamples { w.sendSamples = true duration := time.Since(w.startTime) - level.Info(w.logger).Log("msg", "Done replaying WAL", "duration", duration) + w.logger.Info("Done replaying WAL", "duration", duration) } floatHistogramsToSend = append(floatHistogramsToSend, fh) } @@ -670,7 +670,7 @@ type segmentReadFn func(w *Watcher, r *LiveReader, segmentNum int, tail bool) er // Read all the series records from a Checkpoint directory. func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) error { - level.Debug(w.logger).Log("msg", "Reading checkpoint", "dir", checkpointDir) + w.logger.Debug("Reading checkpoint", "dir", checkpointDir) index, err := checkpointNum(checkpointDir) if err != nil { return fmt.Errorf("checkpointNum: %w", err) @@ -704,7 +704,7 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err } } - level.Debug(w.logger).Log("msg", "Read series references from checkpoint", "checkpoint", checkpointDir) + w.logger.Debug("Read series references from checkpoint", "checkpoint", checkpointDir) return nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go index b14521f358f..54c257d61a4 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go @@ -21,6 +21,7 @@ import ( "fmt" "hash/crc32" "io" + "log/slog" "os" "path/filepath" "slices" @@ -28,11 +29,10 @@ import ( "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/golang/snappy" "github.com/klauspost/compress/zstd" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/tsdb/fileutil" ) @@ -121,7 +121,7 @@ func (e *CorruptionErr) Unwrap() error { } // OpenWriteSegment opens segment k in dir. The returned segment is ready for new appends. -func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { +func OpenWriteSegment(logger *slog.Logger, dir string, k int) (*Segment, error) { segName := SegmentName(dir, k) f, err := os.OpenFile(segName, os.O_WRONLY|os.O_APPEND, 0o666) if err != nil { @@ -138,7 +138,7 @@ func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { // If it was torn mid-record, a full read (which the caller should do anyway // to ensure integrity) will detect it as a corruption by the end. if d := stat.Size() % pageSize; d != 0 { - level.Warn(logger).Log("msg", "Last page of the wlog is torn, filling it with zeros", "segment", segName) + logger.Warn("Last page of the wlog is torn, filling it with zeros", "segment", segName) if _, err := f.Write(make([]byte, pageSize-d)); err != nil { f.Close() return nil, fmt.Errorf("zero-pad torn page: %w", err) @@ -201,7 +201,7 @@ func ParseCompressionType(compress bool, compressType string) CompressionType { // beyond the most recent segment. type WL struct { dir string - logger log.Logger + logger *slog.Logger segmentSize int mtx sync.RWMutex segment *Segment // Active segment. @@ -286,7 +286,7 @@ func newWLMetrics(w *WL, r prometheus.Registerer) *wlMetrics { }, func() float64 { val, err := w.Size() if err != nil { - level.Error(w.logger).Log("msg", "Failed to calculate size of \"wal\" dir", + w.logger.Error("Failed to calculate size of \"wal\" dir", "err", err.Error()) } return float64(val) @@ -309,13 +309,13 @@ func newWLMetrics(w *WL, r prometheus.Registerer) *wlMetrics { } // New returns a new WAL over the given directory. -func New(logger log.Logger, reg prometheus.Registerer, dir string, compress CompressionType) (*WL, error) { +func New(logger *slog.Logger, reg prometheus.Registerer, dir string, compress CompressionType) (*WL, error) { return NewSize(logger, reg, dir, DefaultSegmentSize, compress) } // NewSize returns a new write log over the given directory. // New segments are created with the specified size. -func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress CompressionType) (*WL, error) { +func NewSize(logger *slog.Logger, reg prometheus.Registerer, dir string, segmentSize int, compress CompressionType) (*WL, error) { if segmentSize%pageSize != 0 { return nil, errors.New("invalid segment size") } @@ -323,7 +323,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi return nil, fmt.Errorf("create dir: %w", err) } if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } var zstdWriter *zstd.Encoder @@ -378,9 +378,9 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi } // Open an existing WAL. -func Open(logger log.Logger, dir string) (*WL, error) { +func Open(logger *slog.Logger, dir string) (*WL, error) { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } zstdWriter, err := zstd.NewWriter(nil) if err != nil { @@ -443,7 +443,7 @@ func (w *WL) Repair(origErr error) error { if cerr.Segment < 0 { return errors.New("corruption error does not specify position") } - level.Warn(w.logger).Log("msg", "Starting corruption repair", + w.logger.Warn("Starting corruption repair", "segment", cerr.Segment, "offset", cerr.Offset) // All segments behind the corruption can no longer be used. @@ -451,7 +451,7 @@ func (w *WL) Repair(origErr error) error { if err != nil { return fmt.Errorf("list segments: %w", err) } - level.Warn(w.logger).Log("msg", "Deleting all segments newer than corrupted segment", "segment", cerr.Segment) + w.logger.Warn("Deleting all segments newer than corrupted segment", "segment", cerr.Segment) for _, s := range segs { if w.segment.i == s.index { @@ -473,7 +473,7 @@ func (w *WL) Repair(origErr error) error { // Regardless of the corruption offset, no record reaches into the previous segment. // So we can safely repair the WAL by removing the segment and re-inserting all // its records up to the corruption. - level.Warn(w.logger).Log("msg", "Rewrite corrupted segment", "segment", cerr.Segment) + w.logger.Warn("Rewrite corrupted segment", "segment", cerr.Segment) fn := SegmentName(w.Dir(), cerr.Segment) tmpfn := fn + ".repair" @@ -583,10 +583,10 @@ func (w *WL) nextSegment(async bool) (int, error) { // Don't block further writes by fsyncing the last segment. f := func() { if err := w.fsync(prev); err != nil { - level.Error(w.logger).Log("msg", "sync previous segment", "err", err) + w.logger.Error("sync previous segment", "err", err) } if err := prev.Close(); err != nil { - level.Error(w.logger).Log("msg", "close previous segment", "err", err) + w.logger.Error("close previous segment", "err", err) } } if async { @@ -890,10 +890,10 @@ func (w *WL) Close() (err error) { <-donec if err = w.fsync(w.segment); err != nil { - level.Error(w.logger).Log("msg", "sync previous segment", "err", err) + w.logger.Error("sync previous segment", "err", err) } if err := w.segment.Close(); err != nil { - level.Error(w.logger).Log("msg", "close previous segment", "err", err) + w.logger.Error("close previous segment", "err", err) } w.metrics.Unregister() diff --git a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go index b0272b7fee0..ebe74ecd116 100644 --- a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go +++ b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go @@ -146,6 +146,7 @@ var ( PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo) HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo) + IncompatibleTypesInBinOpInfo = fmt.Errorf("%w: incompatible sample types encountered for binary operator", PromQLInfo) ) type annoErr struct { @@ -273,3 +274,12 @@ func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange. Err: fmt.Errorf("%w %q", HistogramQuantileForcedMonotonicityInfo, metricName), } } + +// NewIncompatibleTypesInBinOpInfo is used if binary operators act on a +// combination of types that doesn't work and therefore returns no result. +func NewIncompatibleTypesInBinOpInfo(lhsType, operator, rhsType string, pos posrange.PositionRange) error { + return annoErr{ + PositionRange: pos, + Err: fmt.Errorf("%w %q: %s %s %s", IncompatibleTypesInBinOpInfo, operator, lhsType, operator, rhsType), + } +} diff --git a/vendor/github.com/prometheus/prometheus/util/convertnhcb/convertnhcb.go b/vendor/github.com/prometheus/prometheus/util/convertnhcb/convertnhcb.go new file mode 100644 index 00000000000..5e08422aa06 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/util/convertnhcb/convertnhcb.go @@ -0,0 +1,173 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package convertnhcb + +import ( + "fmt" + "math" + "sort" + "strings" + + "github.com/prometheus/prometheus/model/histogram" + "github.com/prometheus/prometheus/model/labels" +) + +// TempHistogram is used to collect information about classic histogram +// samples incrementally before creating a histogram.Histogram or +// histogram.FloatHistogram based on the values collected. +type TempHistogram struct { + BucketCounts map[float64]float64 + Count float64 + Sum float64 + HasFloat bool +} + +// NewTempHistogram creates a new TempHistogram to +// collect information about classic histogram samples. +func NewTempHistogram() TempHistogram { + return TempHistogram{ + BucketCounts: map[float64]float64{}, + } +} + +func (h TempHistogram) getIntBucketCounts() (map[float64]int64, error) { + bucketCounts := map[float64]int64{} + for le, count := range h.BucketCounts { + intCount := int64(math.Round(count)) + if float64(intCount) != count { + return nil, fmt.Errorf("bucket count %f for le %g is not an integer", count, le) + } + bucketCounts[le] = intCount + } + return bucketCounts, nil +} + +// ProcessUpperBoundsAndCreateBaseHistogram prepares an integer native +// histogram with custom buckets based on the provided upper bounds. +// Everything is set except the bucket counts. +// The sorted upper bounds are also returned. +func ProcessUpperBoundsAndCreateBaseHistogram(upperBounds0 []float64, needsDedup bool) ([]float64, *histogram.Histogram) { + sort.Float64s(upperBounds0) + var upperBounds []float64 + if needsDedup { + upperBounds = make([]float64, 0, len(upperBounds0)) + prevLE := math.Inf(-1) + for _, le := range upperBounds0 { + if le != prevLE { + upperBounds = append(upperBounds, le) + prevLE = le + } + } + } else { + upperBounds = upperBounds0 + } + var customBounds []float64 + if upperBounds[len(upperBounds)-1] == math.Inf(1) { + customBounds = upperBounds[:len(upperBounds)-1] + } else { + customBounds = upperBounds + } + return upperBounds, &histogram.Histogram{ + Count: 0, + Sum: 0, + Schema: histogram.CustomBucketsSchema, + PositiveSpans: []histogram.Span{ + {Offset: 0, Length: uint32(len(upperBounds))}, + }, + PositiveBuckets: make([]int64, len(upperBounds)), + CustomValues: customBounds, + } +} + +// NewHistogram fills the bucket counts in the provided histogram.Histogram +// or histogram.FloatHistogram based on the provided temporary histogram and +// upper bounds. +func NewHistogram(histogram TempHistogram, upperBounds []float64, hBase *histogram.Histogram, fhBase *histogram.FloatHistogram) (*histogram.Histogram, *histogram.FloatHistogram) { + intBucketCounts, err := histogram.getIntBucketCounts() + if err != nil { + return nil, newFloatHistogram(histogram, upperBounds, histogram.BucketCounts, fhBase) + } + return newIntegerHistogram(histogram, upperBounds, intBucketCounts, hBase), nil +} + +func newIntegerHistogram(histogram TempHistogram, upperBounds []float64, bucketCounts map[float64]int64, hBase *histogram.Histogram) *histogram.Histogram { + h := hBase.Copy() + absBucketCounts := make([]int64, len(h.PositiveBuckets)) + var prevCount, total int64 + for i, le := range upperBounds { + currCount, exists := bucketCounts[le] + if !exists { + currCount = 0 + } + count := currCount - prevCount + absBucketCounts[i] = count + total += count + prevCount = currCount + } + h.PositiveBuckets[0] = absBucketCounts[0] + for i := 1; i < len(h.PositiveBuckets); i++ { + h.PositiveBuckets[i] = absBucketCounts[i] - absBucketCounts[i-1] + } + h.Sum = histogram.Sum + if histogram.Count != 0 { + total = int64(histogram.Count) + } + h.Count = uint64(total) + return h.Compact(0) +} + +func newFloatHistogram(histogram TempHistogram, upperBounds []float64, bucketCounts map[float64]float64, fhBase *histogram.FloatHistogram) *histogram.FloatHistogram { + fh := fhBase.Copy() + var prevCount, total float64 + for i, le := range upperBounds { + currCount, exists := bucketCounts[le] + if !exists { + currCount = 0 + } + count := currCount - prevCount + fh.PositiveBuckets[i] = count + total += count + prevCount = currCount + } + fh.Sum = histogram.Sum + if histogram.Count != 0 { + total = histogram.Count + } + fh.Count = total + return fh.Compact(0) +} + +func GetHistogramMetricBase(m labels.Labels, suffix string) labels.Labels { + mName := m.Get(labels.MetricName) + return labels.NewBuilder(m). + Set(labels.MetricName, strings.TrimSuffix(mName, suffix)). + Del(labels.BucketLabel). + Labels() +} + +// GetHistogramMetricBaseName removes the suffixes _bucket, _sum, _count from +// the metric name. We specifically do not remove the _created suffix as that +// should be removed by the caller. +func GetHistogramMetricBaseName(s string) string { + if r, ok := strings.CutSuffix(s, "_bucket"); ok { + return r + } + if r, ok := strings.CutSuffix(s, "_sum"); ok { + return r + } + if r, ok := strings.CutSuffix(s, "_count"); ok { + return r + } + return s +} diff --git a/vendor/github.com/prometheus/prometheus/util/logging/dedupe.go b/vendor/github.com/prometheus/prometheus/util/logging/dedupe.go index d490a6afdf1..d5aee5c095c 100644 --- a/vendor/github.com/prometheus/prometheus/util/logging/dedupe.go +++ b/vendor/github.com/prometheus/prometheus/util/logging/dedupe.go @@ -14,12 +14,10 @@ package logging import ( - "bytes" + "context" + "log/slog" "sync" "time" - - "github.com/go-kit/log" - "github.com/go-logfmt/logfmt" ) const ( @@ -28,22 +26,9 @@ const ( maxEntries = 1024 ) -type logfmtEncoder struct { - *logfmt.Encoder - buf bytes.Buffer -} - -var logfmtEncoderPool = sync.Pool{ - New: func() interface{} { - var enc logfmtEncoder - enc.Encoder = logfmt.NewEncoder(&enc.buf) - return &enc - }, -} - -// Deduper implement log.Logger, dedupes log lines. +// Deduper implements *slog.Handler, dedupes log lines based on a time duration. type Deduper struct { - next log.Logger + next *slog.Logger repeat time.Duration quit chan struct{} mtx sync.RWMutex @@ -51,7 +36,7 @@ type Deduper struct { } // Dedupe log lines to next, only repeating every repeat duration. -func Dedupe(next log.Logger, repeat time.Duration) *Deduper { +func Dedupe(next *slog.Logger, repeat time.Duration) *Deduper { d := &Deduper{ next: next, repeat: repeat, @@ -62,6 +47,63 @@ func Dedupe(next log.Logger, repeat time.Duration) *Deduper { return d } +// Enabled returns true if the Deduper's internal slog.Logger is enabled at the +// provided context and log level, and returns false otherwise. It implements +// slog.Handler. +func (d *Deduper) Enabled(ctx context.Context, level slog.Level) bool { + return d.next.Enabled(ctx, level) +} + +// Handle uses the provided context and slog.Record to deduplicate messages +// every 1m. Log records received within the interval are not acted on, and +// thus dropped. Log records that pass deduplication and need action invoke the +// Handle() method on the Deduper's internal slog.Logger's handler, effectively +// chaining log calls to the internal slog.Logger. +func (d *Deduper) Handle(ctx context.Context, r slog.Record) error { + line := r.Message + d.mtx.RLock() + last, ok := d.seen[line] + d.mtx.RUnlock() + + if ok && time.Since(last) < d.repeat { + return nil + } + + d.mtx.Lock() + if len(d.seen) < maxEntries { + d.seen[line] = time.Now() + } + d.mtx.Unlock() + + return d.next.Handler().Handle(ctx, r.Clone()) +} + +// WithAttrs adds the provided attributes to the Deduper's internal +// slog.Logger. It implements slog.Handler. +func (d *Deduper) WithAttrs(attrs []slog.Attr) slog.Handler { + return &Deduper{ + next: slog.New(d.next.Handler().WithAttrs(attrs)), + repeat: d.repeat, + quit: d.quit, + seen: d.seen, + } +} + +// WithGroup adds the provided group name to the Deduper's internal +// slog.Logger. It implements slog.Handler. +func (d *Deduper) WithGroup(name string) slog.Handler { + if name == "" { + return d + } + + return &Deduper{ + next: slog.New(d.next.Handler().WithGroup(name)), + repeat: d.repeat, + quit: d.quit, + seen: d.seen, + } +} + // Stop the Deduper. func (d *Deduper) Stop() { close(d.quit) @@ -87,44 +129,3 @@ func (d *Deduper) run() { } } } - -// Log implements log.Logger. -func (d *Deduper) Log(keyvals ...interface{}) error { - line, err := encode(keyvals...) - if err != nil { - return err - } - - d.mtx.RLock() - last, ok := d.seen[line] - d.mtx.RUnlock() - - if ok && time.Since(last) < d.repeat { - return nil - } - - d.mtx.Lock() - if len(d.seen) < maxEntries { - d.seen[line] = time.Now() - } - d.mtx.Unlock() - - return d.next.Log(keyvals...) -} - -func encode(keyvals ...interface{}) (string, error) { - enc := logfmtEncoderPool.Get().(*logfmtEncoder) - enc.buf.Reset() - defer logfmtEncoderPool.Put(enc) - - if err := enc.EncodeKeyvals(keyvals...); err != nil { - return "", err - } - - // Add newline to the end of the buffer - if err := enc.EndRecord(); err != nil { - return "", err - } - - return enc.buf.String(), nil -} diff --git a/vendor/github.com/prometheus/prometheus/util/logging/file.go b/vendor/github.com/prometheus/prometheus/util/logging/file.go index 2afa828547f..f20927bedaf 100644 --- a/vendor/github.com/prometheus/prometheus/util/logging/file.go +++ b/vendor/github.com/prometheus/prometheus/util/logging/file.go @@ -15,20 +15,15 @@ package logging import ( "fmt" + "log/slog" "os" - "time" - "github.com/go-kit/log" + "github.com/prometheus/common/promslog" ) -var timestampFormat = log.TimestampFormat( - func() time.Time { return time.Now().UTC() }, - "2006-01-02T15:04:05.000Z07:00", -) - -// JSONFileLogger represents a logger that writes JSON to a file. +// JSONFileLogger represents a logger that writes JSON to a file. It implements the promql.QueryLogger interface. type JSONFileLogger struct { - logger log.Logger + logger *slog.Logger file *os.File } @@ -40,21 +35,48 @@ func NewJSONFileLogger(s string) (*JSONFileLogger, error) { f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o666) if err != nil { - return nil, fmt.Errorf("can't create json logger: %w", err) + return nil, fmt.Errorf("can't create json log file: %w", err) } + jsonFmt := &promslog.AllowedFormat{} + _ = jsonFmt.Set("json") return &JSONFileLogger{ - logger: log.With(log.NewJSONLogger(f), "ts", timestampFormat), + logger: promslog.New(&promslog.Config{Format: jsonFmt, Writer: f}), file: f, }, nil } -// Close closes the underlying file. +// Close closes the underlying file. It implements the promql.QueryLogger interface. func (l *JSONFileLogger) Close() error { return l.file.Close() } -// Log calls the Log function of the underlying logger. -func (l *JSONFileLogger) Log(i ...interface{}) error { - return l.logger.Log(i...) +// With calls the `With()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) With(args ...any) { + l.logger = l.logger.With(args...) +} + +// Info calls the `Info()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) Info(msg string, args ...any) { + l.logger.Info(msg, args...) +} + +// Error calls the `Error()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) Error(msg string, args ...any) { + l.logger.Error(msg, args...) +} + +// Debug calls the `Debug()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) Debug(msg string, args ...any) { + l.logger.Debug(msg, args...) +} + +// Warn calls the `Warn()` method on the underlying `log/slog.Logger` with the +// provided msg and args. It implements the promql.QueryLogger interface. +func (l *JSONFileLogger) Warn(msg string, args ...any) { + l.logger.Warn(msg, args...) } diff --git a/vendor/github.com/prometheus/prometheus/util/logging/ratelimit.go b/vendor/github.com/prometheus/prometheus/util/logging/ratelimit.go deleted file mode 100644 index 32d1e249e68..00000000000 --- a/vendor/github.com/prometheus/prometheus/util/logging/ratelimit.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logging - -import ( - "github.com/go-kit/log" - "golang.org/x/time/rate" -) - -type ratelimiter struct { - limiter *rate.Limiter - next log.Logger -} - -// RateLimit write to a logger. -func RateLimit(next log.Logger, limit rate.Limit) log.Logger { - return &ratelimiter{ - limiter: rate.NewLimiter(limit, int(limit)), - next: next, - } -} - -func (r *ratelimiter) Log(keyvals ...interface{}) error { - if r.limiter.Allow() { - return r.next.Log(keyvals...) - } - return nil -} diff --git a/vendor/github.com/prometheus/prometheus/util/notifications/notifications.go b/vendor/github.com/prometheus/prometheus/util/notifications/notifications.go new file mode 100644 index 00000000000..4888a0b6641 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/util/notifications/notifications.go @@ -0,0 +1,185 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package notifications + +import ( + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + ConfigurationUnsuccessful = "Configuration reload has failed." + StartingUp = "Prometheus is starting and replaying the write-ahead log (WAL)." + ShuttingDown = "Prometheus is shutting down and gracefully stopping all operations." +) + +// Notification represents an individual notification message. +type Notification struct { + Text string `json:"text"` + Date time.Time `json:"date"` + Active bool `json:"active"` +} + +// Notifications stores a list of Notification objects. +// It also manages live subscribers that receive notifications via channels. +type Notifications struct { + mu sync.Mutex + notifications []Notification + subscribers map[chan Notification]struct{} // Active subscribers. + maxSubscribers int + + subscriberGauge prometheus.Gauge + notificationsSent prometheus.Counter + notificationsDropped prometheus.Counter +} + +// NewNotifications creates a new Notifications instance. +func NewNotifications(maxSubscribers int, reg prometheus.Registerer) *Notifications { + n := &Notifications{ + subscribers: make(map[chan Notification]struct{}), + maxSubscribers: maxSubscribers, + subscriberGauge: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "notification_active_subscribers", + Help: "The current number of active notification subscribers.", + }), + notificationsSent: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "notification_updates_sent_total", + Help: "Total number of notification updates sent.", + }), + notificationsDropped: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "prometheus", + Subsystem: "api", + Name: "notification_updates_dropped_total", + Help: "Total number of notification updates dropped.", + }), + } + + if reg != nil { + reg.MustRegister(n.subscriberGauge, n.notificationsSent, n.notificationsDropped) + } + + return n +} + +// AddNotification adds a new notification or updates the timestamp if it already exists. +func (n *Notifications) AddNotification(text string) { + n.mu.Lock() + defer n.mu.Unlock() + + for i, notification := range n.notifications { + if notification.Text == text { + n.notifications[i].Date = time.Now() + + n.notifySubscribers(n.notifications[i]) + return + } + } + + newNotification := Notification{ + Text: text, + Date: time.Now(), + Active: true, + } + n.notifications = append(n.notifications, newNotification) + + n.notifySubscribers(newNotification) +} + +// notifySubscribers sends a notification to all active subscribers. +func (n *Notifications) notifySubscribers(notification Notification) { + for sub := range n.subscribers { + // Non-blocking send to avoid subscriber blocking issues. + n.notificationsSent.Inc() + select { + case sub <- notification: + // Notification sent to the subscriber. + default: + // Drop the notification if the subscriber's channel is full. + n.notificationsDropped.Inc() + } + } +} + +// DeleteNotification removes the first notification that matches the provided text. +// The deleted notification is sent to subscribers with Active: false before being removed. +func (n *Notifications) DeleteNotification(text string) { + n.mu.Lock() + defer n.mu.Unlock() + + // Iterate through the notifications to find the matching text. + for i, notification := range n.notifications { + if notification.Text == text { + // Mark the notification as inactive and notify subscribers. + notification.Active = false + n.notifySubscribers(notification) + + // Remove the notification from the list. + n.notifications = append(n.notifications[:i], n.notifications[i+1:]...) + return + } + } +} + +// Get returns a copy of the list of notifications for safe access outside the struct. +func (n *Notifications) Get() []Notification { + n.mu.Lock() + defer n.mu.Unlock() + + // Return a copy of the notifications slice to avoid modifying the original slice outside. + notificationsCopy := make([]Notification, len(n.notifications)) + copy(notificationsCopy, n.notifications) + return notificationsCopy +} + +// Sub allows a client to subscribe to live notifications. +// It returns a channel where the subscriber will receive notifications and a function to unsubscribe. +// Each subscriber has its own goroutine to handle notifications and prevent blocking. +func (n *Notifications) Sub() (<-chan Notification, func(), bool) { + n.mu.Lock() + defer n.mu.Unlock() + + if len(n.subscribers) >= n.maxSubscribers { + return nil, nil, false + } + + ch := make(chan Notification, 10) // Buffered channel to prevent blocking. + + // Add the new subscriber to the list. + n.subscribers[ch] = struct{}{} + n.subscriberGauge.Set(float64(len(n.subscribers))) + + // Send all current notifications to the new subscriber. + for _, notification := range n.notifications { + ch <- notification + } + + // Unsubscribe function to remove the channel from subscribers. + unsubscribe := func() { + n.mu.Lock() + defer n.mu.Unlock() + + // Close the channel and remove it from the subscribers map. + close(ch) + delete(n.subscribers, ch) + n.subscriberGauge.Set(float64(len(n.subscribers))) + } + + return ch, unsubscribe, true +} diff --git a/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go b/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go index 7d1f9dda242..e15d591e0c7 100644 --- a/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go +++ b/vendor/github.com/prometheus/prometheus/util/teststorage/storage.go @@ -30,15 +30,15 @@ import ( // New returns a new TestStorage for testing purposes // that removes all associated files on closing. -func New(t testutil.T) *TestStorage { - stor, err := NewWithError() +func New(t testutil.T, outOfOrderTimeWindow ...int64) *TestStorage { + stor, err := NewWithError(outOfOrderTimeWindow...) require.NoError(t, err) return stor } // NewWithError returns a new TestStorage for user facing tests, which reports // errors directly. -func NewWithError() (*TestStorage, error) { +func NewWithError(outOfOrderTimeWindow ...int64) (*TestStorage, error) { dir, err := os.MkdirTemp("", "test_storage") if err != nil { return nil, fmt.Errorf("opening test directory: %w", err) @@ -51,6 +51,14 @@ func NewWithError() (*TestStorage, error) { opts.MaxBlockDuration = int64(24 * time.Hour / time.Millisecond) opts.RetentionDuration = 0 opts.EnableNativeHistograms = true + + // Set OutOfOrderTimeWindow if provided, otherwise use default (0) + if len(outOfOrderTimeWindow) > 0 { + opts.OutOfOrderTimeWindow = outOfOrderTimeWindow[0] + } else { + opts.OutOfOrderTimeWindow = 0 // Default value is zero + } + db, err := tsdb.Open(dir, nil, nil, opts, tsdb.NewDBStats()) if err != nil { return nil, fmt.Errorf("opening test storage: %w", err) diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/port.go b/vendor/github.com/prometheus/prometheus/util/testutil/port.go index 1e449b123d3..7cf4cf1ccc9 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/port.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/port.go @@ -15,21 +15,56 @@ package testutil import ( "net" + "sync" "testing" ) +var ( + mu sync.Mutex + usedPorts []int +) + // RandomUnprivilegedPort returns valid unprivileged random port number which can be used for testing. func RandomUnprivilegedPort(t *testing.T) int { t.Helper() + mu.Lock() + defer mu.Unlock() + + port, err := getPort() + if err != nil { + t.Fatal(err) + } + + for portWasUsed(port) { + port, err = getPort() + if err != nil { + t.Fatal(err) + } + } + + usedPorts = append(usedPorts, port) + + return port +} + +func portWasUsed(port int) bool { + for _, usedPort := range usedPorts { + if port == usedPort { + return true + } + } + return false +} +func getPort() (int, error) { listener, err := net.Listen("tcp", ":0") if err != nil { - t.Fatalf("Listening on random port: %v", err) + return 0, err } if err := listener.Close(); err != nil { - t.Fatalf("Closing listener: %v", err) + return 0, err } - return listener.Addr().(*net.TCPAddr).Port + return listener.Addr().(*net.TCPAddr).Port, nil } diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index ff700d835f1..63ffe627604 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -15,8 +15,12 @@ package v1 import ( "context" + "crypto/sha1" + "encoding/hex" + "encoding/json" "errors" "fmt" + "log/slog" "math" "math/rand" "net" @@ -30,8 +34,6 @@ import ( "strings" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/regexp" jsoniter "github.com/json-iterator/go" "github.com/munnerz/goautoneg" @@ -53,6 +55,7 @@ import ( "github.com/prometheus/prometheus/tsdb/index" "github.com/prometheus/prometheus/util/annotations" "github.com/prometheus/prometheus/util/httputil" + "github.com/prometheus/prometheus/util/notifications" "github.com/prometheus/prometheus/util/stats" ) @@ -202,16 +205,18 @@ type API struct { ready func(http.HandlerFunc) http.HandlerFunc globalURLOptions GlobalURLOptions - db TSDBAdminStats - dbDir string - enableAdmin bool - logger log.Logger - CORSOrigin *regexp.Regexp - buildInfo *PrometheusVersion - runtimeInfo func() (RuntimeInfo, error) - gatherer prometheus.Gatherer - isAgent bool - statsRenderer StatsRenderer + db TSDBAdminStats + dbDir string + enableAdmin bool + logger *slog.Logger + CORSOrigin *regexp.Regexp + buildInfo *PrometheusVersion + runtimeInfo func() (RuntimeInfo, error) + gatherer prometheus.Gatherer + isAgent bool + statsRenderer StatsRenderer + notificationsGetter func() []notifications.Notification + notificationsSub func() (<-chan notifications.Notification, func(), bool) remoteWriteHandler http.Handler remoteReadHandler http.Handler @@ -236,7 +241,7 @@ func NewAPI( db TSDBAdminStats, dbDir string, enableAdmin bool, - logger log.Logger, + logger *slog.Logger, rr func(context.Context) RulesRetriever, remoteReadSampleLimit int, remoteReadConcurrencyLimit int, @@ -245,6 +250,8 @@ func NewAPI( corsOrigin *regexp.Regexp, runtimeInfo func() (RuntimeInfo, error), buildInfo *PrometheusVersion, + notificationsGetter func() []notifications.Notification, + notificationsSub func() (<-chan notifications.Notification, func(), bool), gatherer prometheus.Gatherer, registerer prometheus.Registerer, statsRenderer StatsRenderer, @@ -263,22 +270,24 @@ func NewAPI( targetRetriever: tr, alertmanagerRetriever: ar, - now: time.Now, - config: configFunc, - flagsMap: flagsMap, - ready: readyFunc, - globalURLOptions: globalURLOptions, - db: db, - dbDir: dbDir, - enableAdmin: enableAdmin, - rulesRetriever: rr, - logger: logger, - CORSOrigin: corsOrigin, - runtimeInfo: runtimeInfo, - buildInfo: buildInfo, - gatherer: gatherer, - isAgent: isAgent, - statsRenderer: DefaultStatsRenderer, + now: time.Now, + config: configFunc, + flagsMap: flagsMap, + ready: readyFunc, + globalURLOptions: globalURLOptions, + db: db, + dbDir: dbDir, + enableAdmin: enableAdmin, + rulesRetriever: rr, + logger: logger, + CORSOrigin: corsOrigin, + runtimeInfo: runtimeInfo, + buildInfo: buildInfo, + gatherer: gatherer, + isAgent: isAgent, + statsRenderer: DefaultStatsRenderer, + notificationsGetter: notificationsGetter, + notificationsSub: notificationsSub, remoteReadHandler: remote.NewReadHandler(logger, registerer, q, configFunc, remoteReadSampleLimit, remoteReadConcurrencyLimit, remoteReadMaxBytesInFrame), } @@ -368,6 +377,9 @@ func (api *API) Register(r *route.Router) { r.Get("/format_query", wrapAgent(api.formatQuery)) r.Post("/format_query", wrapAgent(api.formatQuery)) + r.Get("/parse_query", wrapAgent(api.parseQuery)) + r.Post("/parse_query", wrapAgent(api.parseQuery)) + r.Get("/labels", wrapAgent(api.labelNames)) r.Post("/labels", wrapAgent(api.labelNames)) r.Get("/label/:name/values", wrapAgent(api.labelValues)) @@ -389,6 +401,8 @@ func (api *API) Register(r *route.Router) { r.Get("/status/flags", wrap(api.serveFlags)) r.Get("/status/tsdb", wrapAgent(api.serveTSDBStatus)) r.Get("/status/walreplay", api.serveWALReplayStatus) + r.Get("/notifications", api.notifications) + r.Get("/notifications/live", api.notificationsSSE) r.Post("/read", api.ready(api.remoteRead)) r.Post("/write", api.ready(api.remoteWrite)) r.Post("/otlp/v1/metrics", api.ready(api.otlpWrite)) @@ -487,6 +501,15 @@ func (api *API) formatQuery(r *http.Request) (result apiFuncResult) { return apiFuncResult{expr.Pretty(0), nil, nil, nil} } +func (api *API) parseQuery(r *http.Request) apiFuncResult { + expr, err := parser.ParseExpr(r.FormValue("query")) + if err != nil { + return invalidParamError(err, "query") + } + + return apiFuncResult{data: translateAST(expr), err: nil, warnings: nil, finalizer: nil} +} + func extractQueryOpts(r *http.Request) (promql.QueryOpts, error) { var duration time.Duration @@ -814,12 +837,22 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { } var ( - // MinTime is the default timestamp used for the begin of optional time ranges. - // Exposed to let downstream projects to reference it. + // MinTime is the default timestamp used for the start of optional time ranges. + // Exposed to let downstream projects reference it. + // + // Historical note: This should just be time.Unix(math.MinInt64/1000, 0).UTC(), + // but it was set to a higher value in the past due to a misunderstanding. + // The value is still low enough for practical purposes, so we don't want + // to change it now, avoiding confusion for importers of this variable. MinTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC() // MaxTime is the default timestamp used for the end of optional time ranges. // Exposed to let downstream projects to reference it. + // + // Historical note: This should just be time.Unix(math.MaxInt64/1000, 0).UTC(), + // but it was set to a lower value in the past due to a misunderstanding. + // The value is still high enough for practical purposes, so we don't want + // to change it now, avoiding confusion for importers of this variable. MaxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC() minTimeFormatted = MinTime.Format(time.RFC3339Nano) @@ -1342,7 +1375,8 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { // RuleDiscovery has info for all rules. type RuleDiscovery struct { - RuleGroups []*RuleGroup `json:"groups"` + RuleGroups []*RuleGroup `json:"groups"` + GroupNextToken string `json:"groupNextToken:omitempty"` } // RuleGroup has info for rules which are part of a group. @@ -1429,8 +1463,23 @@ func (api *API) rules(r *http.Request) apiFuncResult { return invalidParamError(err, "exclude_alerts") } + maxGroups, nextToken, parseErr := parseListRulesPaginationRequest(r) + if parseErr != nil { + return *parseErr + } + rgs := make([]*RuleGroup, 0, len(ruleGroups)) + + foundToken := false + for _, grp := range ruleGroups { + if maxGroups > 0 && nextToken != "" && !foundToken { + if nextToken != getRuleGroupNextToken(grp.File(), grp.Name()) { + continue + } + foundToken = true + } + if len(rgSet) > 0 { if _, ok := rgSet[grp.Name()]; !ok { continue @@ -1475,6 +1524,7 @@ func (api *API) rules(r *http.Request) apiFuncResult { if !excludeAlerts { activeAlerts = rulesAlertsToAPIAlerts(rule.ActiveAlerts()) } + enrichedRule = AlertingRule{ State: rule.State().String(), Name: rule.Name(), @@ -1490,6 +1540,7 @@ func (api *API) rules(r *http.Request) apiFuncResult { LastEvaluation: rule.GetEvaluationTimestamp(), Type: "alerting", } + case *rules.RecordingRule: if !returnRecording { break @@ -1516,9 +1567,20 @@ func (api *API) rules(r *http.Request) apiFuncResult { // If the rule group response has no rules, skip it - this means we filtered all the rules of this group. if len(apiRuleGroup.Rules) > 0 { + if maxGroups > 0 && len(rgs) == int(maxGroups) { + // We've reached the capacity of our page plus one. That means that for sure there will be at least one + // rule group in a subsequent request. Therefore a next token is required. + res.GroupNextToken = getRuleGroupNextToken(grp.File(), grp.Name()) + break + } rgs = append(rgs, apiRuleGroup) } } + + if maxGroups > 0 && nextToken != "" && !foundToken { + return invalidParamError(fmt.Errorf("invalid group_next_token '%v'. were rule groups changed?", nextToken), "group_next_token") + } + res.RuleGroups = rgs return apiFuncResult{res, nil, nil, nil} } @@ -1537,6 +1599,44 @@ func parseExcludeAlerts(r *http.Request) (bool, error) { return excludeAlerts, nil } +func parseListRulesPaginationRequest(r *http.Request) (int64, string, *apiFuncResult) { + var ( + parsedMaxGroups int64 = -1 + err error + ) + maxGroups := r.URL.Query().Get("group_limit") + nextToken := r.URL.Query().Get("group_next_token") + + if nextToken != "" && maxGroups == "" { + errResult := invalidParamError(fmt.Errorf("group_limit needs to be present in order to paginate over the groups"), "group_next_token") + return -1, "", &errResult + } + + if maxGroups != "" { + parsedMaxGroups, err = strconv.ParseInt(maxGroups, 10, 32) + if err != nil { + errResult := invalidParamError(fmt.Errorf("group_limit needs to be a valid number: %w", err), "group_limit") + return -1, "", &errResult + } + if parsedMaxGroups <= 0 { + errResult := invalidParamError(fmt.Errorf("group_limit needs to be greater than 0"), "group_limit") + return -1, "", &errResult + } + } + + if parsedMaxGroups > 0 { + return parsedMaxGroups, nextToken, nil + } + + return -1, "", nil +} + +func getRuleGroupNextToken(file, group string) string { + h := sha1.New() + h.Write([]byte(file + ";" + group)) + return hex.EncodeToString(h.Sum(nil)) +} + type prometheusConfig struct { YAML string `json:"yaml"` } @@ -1658,6 +1758,57 @@ func (api *API) serveWALReplayStatus(w http.ResponseWriter, r *http.Request) { }, nil, "") } +func (api *API) notifications(w http.ResponseWriter, r *http.Request) { + httputil.SetCORS(w, api.CORSOrigin, r) + api.respond(w, r, api.notificationsGetter(), nil, "") +} + +func (api *API) notificationsSSE(w http.ResponseWriter, r *http.Request) { + httputil.SetCORS(w, api.CORSOrigin, r) + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Connection", "keep-alive") + + // Subscribe to notifications. + notifications, unsubscribe, ok := api.notificationsSub() + if !ok { + w.WriteHeader(http.StatusNoContent) + return + } + defer unsubscribe() + + // Set up a flusher to push the response to the client. + flusher, ok := w.(http.Flusher) + if !ok { + http.Error(w, "Streaming unsupported", http.StatusInternalServerError) + return + } + + // Flush the response to ensure the headers are immediately and eventSource + // onopen is triggered client-side. + flusher.Flush() + + for { + select { + case notification := <-notifications: + // Marshal the notification to JSON. + jsonData, err := json.Marshal(notification) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + continue + } + + // Write the event data in SSE format with JSON content. + fmt.Fprintf(w, "data: %s\n\n", jsonData) + + // Flush the response to ensure the data is sent immediately. + flusher.Flush() + case <-r.Context().Done(): + return + } + } +} + func (api *API) remoteRead(w http.ResponseWriter, r *http.Request) { // This is only really for tests - this will never be nil IRL. if api.remoteReadHandler != nil { @@ -1679,7 +1830,7 @@ func (api *API) otlpWrite(w http.ResponseWriter, r *http.Request) { if api.otlpWriteHandler != nil { api.otlpWriteHandler.ServeHTTP(w, r) } else { - http.Error(w, "otlp write receiver needs to be enabled with --enable-feature=otlp-write-receiver", http.StatusNotFound) + http.Error(w, "otlp write receiver needs to be enabled with --web.enable-otlp-receiver", http.StatusNotFound) } } @@ -1782,7 +1933,7 @@ func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface b, err := codec.Encode(resp) if err != nil { - level.Error(api.logger).Log("msg", "error marshaling response", "url", req.URL, "err", err) + api.logger.Error("error marshaling response", "url", req.URL, "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -1790,7 +1941,7 @@ func (api *API) respond(w http.ResponseWriter, req *http.Request, data interface w.Header().Set("Content-Type", codec.ContentType().String()) w.WriteHeader(http.StatusOK) if n, err := w.Write(b); err != nil { - level.Error(api.logger).Log("msg", "error writing response", "url", req.URL, "bytesWritten", n, "err", err) + api.logger.Error("error writing response", "url", req.URL, "bytesWritten", n, "err", err) } } @@ -1820,7 +1971,7 @@ func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data inter Data: data, }) if err != nil { - level.Error(api.logger).Log("msg", "error marshaling json response", "err", err) + api.logger.Error("error marshaling json response", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -1848,7 +1999,7 @@ func (api *API) respondError(w http.ResponseWriter, apiErr *apiError, data inter w.Header().Set("Content-Type", "application/json") w.WriteHeader(code) if n, err := w.Write(b); err != nil { - level.Error(api.logger).Log("msg", "error writing response", "bytesWritten", n, "err", err) + api.logger.Error("error writing response", "bytesWritten", n, "err", err) } } diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/translate_ast.go b/vendor/github.com/prometheus/prometheus/web/api/v1/translate_ast.go new file mode 100644 index 00000000000..afa11f16b9d --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/translate_ast.go @@ -0,0 +1,157 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "strconv" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/promql/parser" +) + +// Take a Go PromQL AST and translate it to an object that's nicely JSON-serializable +// for the tree view in the UI. +// TODO: Could it make sense to do this via the normal JSON marshalling methods? Maybe +// too UI-specific though. +func translateAST(node parser.Expr) interface{} { + if node == nil { + return nil + } + + switch n := node.(type) { + case *parser.AggregateExpr: + return map[string]interface{}{ + "type": "aggregation", + "op": n.Op.String(), + "expr": translateAST(n.Expr), + "param": translateAST(n.Param), + "grouping": sanitizeList(n.Grouping), + "without": n.Without, + } + case *parser.BinaryExpr: + var matching interface{} + if m := n.VectorMatching; m != nil { + matching = map[string]interface{}{ + "card": m.Card.String(), + "labels": sanitizeList(m.MatchingLabels), + "on": m.On, + "include": sanitizeList(m.Include), + } + } + + return map[string]interface{}{ + "type": "binaryExpr", + "op": n.Op.String(), + "lhs": translateAST(n.LHS), + "rhs": translateAST(n.RHS), + "matching": matching, + "bool": n.ReturnBool, + } + case *parser.Call: + args := []interface{}{} + for _, arg := range n.Args { + args = append(args, translateAST(arg)) + } + + return map[string]interface{}{ + "type": "call", + "func": map[string]interface{}{ + "name": n.Func.Name, + "argTypes": n.Func.ArgTypes, + "variadic": n.Func.Variadic, + "returnType": n.Func.ReturnType, + }, + "args": args, + } + case *parser.MatrixSelector: + vs := n.VectorSelector.(*parser.VectorSelector) + return map[string]interface{}{ + "type": "matrixSelector", + "name": vs.Name, + "range": n.Range.Milliseconds(), + "offset": vs.OriginalOffset.Milliseconds(), + "matchers": translateMatchers(vs.LabelMatchers), + "timestamp": vs.Timestamp, + "startOrEnd": getStartOrEnd(vs.StartOrEnd), + } + case *parser.SubqueryExpr: + return map[string]interface{}{ + "type": "subquery", + "expr": translateAST(n.Expr), + "range": n.Range.Milliseconds(), + "offset": n.OriginalOffset.Milliseconds(), + "step": n.Step.Milliseconds(), + "timestamp": n.Timestamp, + "startOrEnd": getStartOrEnd(n.StartOrEnd), + } + case *parser.NumberLiteral: + return map[string]string{ + "type": "numberLiteral", + "val": strconv.FormatFloat(n.Val, 'f', -1, 64), + } + case *parser.ParenExpr: + return map[string]interface{}{ + "type": "parenExpr", + "expr": translateAST(n.Expr), + } + case *parser.StringLiteral: + return map[string]interface{}{ + "type": "stringLiteral", + "val": n.Val, + } + case *parser.UnaryExpr: + return map[string]interface{}{ + "type": "unaryExpr", + "op": n.Op.String(), + "expr": translateAST(n.Expr), + } + case *parser.VectorSelector: + return map[string]interface{}{ + "type": "vectorSelector", + "name": n.Name, + "offset": n.OriginalOffset.Milliseconds(), + "matchers": translateMatchers(n.LabelMatchers), + "timestamp": n.Timestamp, + "startOrEnd": getStartOrEnd(n.StartOrEnd), + } + } + panic("unsupported node type") +} + +func sanitizeList(l []string) []string { + if l == nil { + return []string{} + } + return l +} + +func translateMatchers(in []*labels.Matcher) interface{} { + out := []map[string]interface{}{} + for _, m := range in { + out = append(out, map[string]interface{}{ + "name": m.Name, + "value": m.Value, + "type": m.Type.String(), + }) + } + return out +} + +func getStartOrEnd(startOrEnd parser.ItemType) interface{} { + if startOrEnd == 0 { + return nil + } + + return startOrEnd.String() +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go index 5bbfab962b0..91b803922a3 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go @@ -225,6 +225,15 @@ func (m Map) Range(f func(k string, v Value) bool) { } } +// MoveTo moves all key/values from the current map overriding the destination and +// resetting the current instance to its zero value +func (m Map) MoveTo(dest Map) { + m.getState().AssertMutable() + dest.getState().AssertMutable() + *dest.getOrig() = *m.getOrig() + *m.getOrig() = nil +} + // CopyTo copies all elements from the current map overriding the destination. func (m Map) CopyTo(dest Map) { dest.getState().AssertMutable() diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go index 67e03f24810..6ef23721cb7 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go @@ -215,6 +215,10 @@ func (ct *clientTracer) start(hook, spanName string, attrs ...attribute.KeyValue func (ct *clientTracer) end(hook string, err error, attrs ...attribute.KeyValue) { if !ct.useSpans { + // sometimes end may be called without previous start + if ct.root == nil { + ct.root = trace.SpanFromContext(ct.Context) + } if err != nil { attrs = append(attrs, attribute.String(hook+".error", err.Error())) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go index 16d9fc97bae..a978bb5b288 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go @@ -5,7 +5,7 @@ package otelhttptrace // import "go.opentelemetry.io/contrib/instrumentation/net // Version is the current release version of the httptrace instrumentation. func Version() string { - return "0.55.0" + return "0.56.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go index 5d6e6156b7b..a83a026274a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -18,13 +18,6 @@ const ( WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) ) -// Client HTTP metrics. -const ( - clientRequestSize = "http.client.request.size" // Outgoing request bytes total - clientResponseSize = "http.client.response.size" // Outgoing response bytes total - clientDuration = "http.client.duration" // Outgoing end to end duration, milliseconds -) - // Filter is a predicate used to determine whether a given http.request should // be traced. A Filter must return true if the request should be traced. type Filter func(*http.Request) bool diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 33580a35b77..e4236ab398c 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -81,12 +81,6 @@ func (h *middleware) configure(c *config) { h.semconv = semconv.NewHTTPServer(c.Meter) } -func handleErr(err error) { - if err != nil { - otel.Handle(err) - } -} - // serveHTTP sets up tracing and calls the given next http.Handler with the span // context injected into the request context. func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) { @@ -190,14 +184,18 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.semconv.RecordMetrics(ctx, semconv.MetricData{ - ServerName: h.server, - Req: r, - StatusCode: statusCode, - AdditionalAttributes: labeler.Get(), - RequestSize: bw.BytesRead(), - ResponseSize: bytesWritten, - ElapsedTime: elapsedTime, + h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: semconv.MetricAttributes{ + Req: r, + StatusCode: statusCode, + AdditionalAttributes: labeler.Get(), + }, + MetricData: semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, }) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 9cae4cab86a..fb893b25042 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -83,18 +83,26 @@ func (s HTTPServer) Status(code int) (codes.Code, string) { return codes.Unset, "" } -type MetricData struct { - ServerName string +type ServerMetricData struct { + ServerName string + ResponseSize int64 + + MetricData + MetricAttributes +} + +type MetricAttributes struct { Req *http.Request StatusCode int AdditionalAttributes []attribute.KeyValue +} - RequestSize int64 - ResponseSize int64 - ElapsedTime float64 +type MetricData struct { + RequestSize int64 + ElapsedTime float64 } -func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { +func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { // This will happen if an HTTPServer{} is used insted of NewHTTPServer. return @@ -102,7 +110,7 @@ func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) { attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := []metric.AddOption{o} // Allocate vararg slice once. + addOpts := []metric.AddOption{o} s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...) s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...) s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) @@ -122,11 +130,20 @@ func NewHTTPServer(meter metric.Meter) HTTPServer { type HTTPClient struct { duplicate bool + + // old metrics + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + latencyMeasure metric.Float64Histogram } -func NewHTTPClient() HTTPClient { +func NewHTTPClient(meter metric.Meter) HTTPClient { env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) - return HTTPClient{duplicate: env == "http/dup"} + client := HTTPClient{ + duplicate: env == "http/dup", + } + client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = oldHTTPClient{}.createMeasures(meter) + return client } // RequestTraceAttrs returns attributes for an HTTP request made by a client. @@ -163,3 +180,48 @@ func (c HTTPClient) ErrorType(err error) attribute.KeyValue { return attribute.KeyValue{} } + +type MetricOpts struct { + measurement metric.MeasurementOption + addOptions metric.AddOption +} + +func (o MetricOpts) MeasurementOption() metric.MeasurementOption { + return o.measurement +} + +func (o MetricOpts) AddOptions() metric.AddOption { + return o.addOptions +} + +func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { + attributes := oldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + // TODO: Duplicate Metrics + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + return MetricOpts{ + measurement: set, + addOptions: set, + } +} + +func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) { + if s.requestBytesCounter == nil || s.latencyMeasure == nil { + // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + return + } + + s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption()) + + // TODO: Duplicate Metrics +} + +func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) { + if s.responseBytesCounter == nil { + // This will happen if an HTTPClient{} is used insted of NewHTTPClient(). + return + } + + s.responseBytesCounter.Add(ctx, responseData, opts) + // TODO: Duplicate Metrics +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index c999b05e675..5367732ec5d 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -144,7 +144,7 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - o.methodMetric(req.Method), + standardizeHTTPMethodMetric(req.Method), o.scheme(req.TLS != nil), semconv.NetHostName(host)) @@ -164,16 +164,6 @@ func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, status return attributes } -func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" - } - return semconv.HTTPMethod(method) -} - func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return semconv.HTTPSchemeHTTPS @@ -190,3 +180,95 @@ func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue { return semconvutil.HTTPClientResponse(resp) } + +func (o oldHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + http.status_code int + net.peer.name string + net.peer.port int + */ + + n := 2 // method, peer name. + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = splitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + n++ + } + + if statusCode > 0 { + n++ + } + + attributes := slices.Grow(additionalAttributes, n) + attributes = append(attributes, + standardizeHTTPMethodMetric(req.Method), + semconv.NetPeerName(requestHost), + ) + + if port > 0 { + attributes = append(attributes, semconv.NetPeerPort(port)) + } + + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPStatusCode(statusCode)) + } + return attributes +} + +// Client HTTP metrics. +const ( + clientRequestSize = "http.client.request.size" // Incoming request bytes total + clientResponseSize = "http.client.response.size" // Incoming response bytes total + clientDuration = "http.client.duration" // Incoming end to end duration, milliseconds +) + +func (o oldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{} + } + requestBytesCounter, err := meter.Int64Counter( + clientRequestSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP request messages."), + ) + handleErr(err) + + responseBytesCounter, err := meter.Int64Counter( + clientResponseSize, + metric.WithUnit("By"), + metric.WithDescription("Measures the size of HTTP response messages."), + ) + handleErr(err) + + latencyMeasure, err := meter.Float64Histogram( + clientDuration, + metric.WithUnit("ms"), + metric.WithDescription("Measures the duration of outbound HTTP requests."), + ) + handleErr(err) + + return requestBytesCounter, responseBytesCounter, latencyMeasure +} + +func standardizeHTTPMethodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return semconv.HTTPMethod(method) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index b4119d3438b..39681ad4b09 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -13,11 +13,9 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" - "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" @@ -29,7 +27,6 @@ type Transport struct { rt http.RoundTripper tracer trace.Tracer - meter metric.Meter propagators propagation.TextMapPropagator spanStartOptions []trace.SpanStartOption filters []Filter @@ -37,10 +34,7 @@ type Transport struct { clientTrace func(context.Context) *httptrace.ClientTrace metricAttributesFn func(*http.Request) []attribute.KeyValue - semconv semconv.HTTPClient - requestBytesCounter metric.Int64Counter - responseBytesCounter metric.Int64Counter - latencyMeasure metric.Float64Histogram + semconv semconv.HTTPClient } var _ http.RoundTripper = &Transport{} @@ -57,8 +51,7 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { } t := Transport{ - rt: base, - semconv: semconv.NewHTTPClient(), + rt: base, } defaultOpts := []Option{ @@ -68,46 +61,21 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport { c := newConfig(append(defaultOpts, opts...)...) t.applyConfig(c) - t.createMeasures() return &t } func (t *Transport) applyConfig(c *config) { t.tracer = c.Tracer - t.meter = c.Meter t.propagators = c.Propagators t.spanStartOptions = c.SpanStartOptions t.filters = c.Filters t.spanNameFormatter = c.SpanNameFormatter t.clientTrace = c.ClientTrace + t.semconv = semconv.NewHTTPClient(c.Meter) t.metricAttributesFn = c.MetricAttributesFn } -func (t *Transport) createMeasures() { - var err error - t.requestBytesCounter, err = t.meter.Int64Counter( - clientRequestSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP request messages."), - ) - handleErr(err) - - t.responseBytesCounter, err = t.meter.Int64Counter( - clientResponseSize, - metric.WithUnit("By"), - metric.WithDescription("Measures the size of HTTP response messages."), - ) - handleErr(err) - - t.latencyMeasure, err = t.meter.Float64Histogram( - clientDuration, - metric.WithUnit("ms"), - metric.WithDescription("Measures the duration of outbound HTTP requests."), - ) - handleErr(err) -} - func defaultTransportFormatter(_ string, r *http.Request) string { return "HTTP " + r.Method } @@ -177,16 +145,15 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { } // metrics - metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...) - if res.StatusCode > 0 { - metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode)) - } - o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...)) + metricOpts := t.semconv.MetricOptions(semconv.MetricAttributes{ + Req: r, + StatusCode: res.StatusCode, + AdditionalAttributes: append(labeler.Get(), t.metricAttributesFromRequest(r)...), + }) - t.requestBytesCounter.Add(ctx, bw.BytesRead(), o) // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.responseBytesCounter.Add(ctx, n, o) + t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions()) } // traces @@ -198,9 +165,12 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - t.latencyMeasure.Record(ctx, elapsedTime, o) + t.semconv.RecordMetrics(ctx, semconv.MetricData{ + RequestSize: bw.BytesRead(), + ElapsedTime: elapsedTime, + }, metricOpts) - return res, err + return res, nil } func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 1133961d393..a07d8689d47 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.55.0" + return "0.56.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index a5f904197fe..d09555506f7 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -25,6 +25,7 @@ linters: - revive - staticcheck - tenv + - testifylint - typecheck - unconvert - unused @@ -302,3 +303,9 @@ linters-settings: # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value - name: waitgroup-by-value disabled: false + testifylint: + enable-all: true + disable: + - float-compare + - go-require + - require-error diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index fb107426e76..4b361d0269c 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -11,6 +11,35 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm +## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11 + +### Added + +- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862) +- Add `WithExportBufferSize` option to log batch processor.(#5877) + +### Changed + +- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778) +- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791) +- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791) +- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847) +- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864) +- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858) +- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874) + +### Deprecated + +- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854) + +### Fixed + +- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819) +- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803) +- Fix timer channel drain to avoid hanging on Go 1.23. (#5868) +- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827) +- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827) + ## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09 ### Added @@ -3081,7 +3110,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.30.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...HEAD +[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 [1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0 [1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0 [1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 5904bb7070e..945a07d2b07 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu +* @MrAlias @XSAM @dashpole @pellared @dmathieu -CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu +CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 91580725350..bb339655743 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -631,11 +631,8 @@ should be canceled. ### Approvers -- [Chester Cheung](https://github.com/hanyuancheung), Tencent - ### Maintainers -- [Aaron Clawson](https://github.com/MadVikingGod), LightStep - [Damien Mathieu](https://github.com/dmathieu), Elastic - [David Ashpole](https://github.com/dashpole), Google - [Robert Pająk](https://github.com/pellared), Splunk @@ -644,11 +641,13 @@ should be canceled. ### Emeritus -- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep -- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Aaron Clawson](https://github.com/MadVikingGod), LightStep - [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Chester Cheung](https://github.com/hanyuancheung), Tencent - [Evan Torrie](https://github.com/evantorrie), Yahoo +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep +- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb ### Become an Approver or a Maintainer diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index b04695b242f..a1228a21240 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -54,9 +54,6 @@ $(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer PORTO = $(TOOLS)/porto $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto -GOJQ = $(TOOLS)/gojq -$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq - GOTMPL = $(TOOLS)/gotmpl $(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl @@ -67,7 +64,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) # Virtualized python tools via docker diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 9a65707038c..efec278905b 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -89,8 +89,8 @@ If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need to use the [Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) -package. The included [examples](./example/) are a good way to see some -practical uses of this process. +package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples) +are a good way to see some practical uses of this process. ### Export diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 59992984d42..ffa9b61258a 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -111,17 +111,6 @@ It is critical you make sure the version you push upstream is correct. Finally create a Release for the new `` on GitHub. The release body should include all the release notes from the Changelog for this release. -## Verify Examples - -After releasing verify that examples build outside of the repository. - -``` -./verify_examples.sh -``` - -The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. -This ensures they build with the published release, not the local copy. - ## Post-Release ### Contrib Repository diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index bff9c7fdbb9..6cbefceadfe 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -347,45 +347,25 @@ func computeDistinct(kvs []KeyValue) Distinct { func computeDistinctFixed(kvs []KeyValue) interface{} { switch len(kvs) { case 1: - ptr := new([1]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [1]KeyValue(kvs) case 2: - ptr := new([2]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [2]KeyValue(kvs) case 3: - ptr := new([3]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [3]KeyValue(kvs) case 4: - ptr := new([4]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [4]KeyValue(kvs) case 5: - ptr := new([5]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [5]KeyValue(kvs) case 6: - ptr := new([6]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [6]KeyValue(kvs) case 7: - ptr := new([7]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [7]KeyValue(kvs) case 8: - ptr := new([8]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [8]KeyValue(kvs) case 9: - ptr := new([9]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [9]KeyValue(kvs) case 10: - ptr := new([10]KeyValue) - copy((*ptr)[:], kvs) - return *ptr + return [10]KeyValue(kvs) default: return nil } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go index f2fc3929b11..e3db438a09f 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go @@ -152,14 +152,17 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) return m.delegate.Int64Counter(name, options...) } - i := &siCounter{name: name, opts: options} cfg := metric.NewInt64CounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*siCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Counter), nil + } + i := &siCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -172,14 +175,17 @@ func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCou return m.delegate.Int64UpDownCounter(name, options...) } - i := &siUpDownCounter{name: name, opts: options} cfg := metric.NewInt64UpDownCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*siUpDownCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64UpDownCounter), nil + } + i := &siUpDownCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -192,14 +198,17 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti return m.delegate.Int64Histogram(name, options...) } - i := &siHistogram{name: name, opts: options} cfg := metric.NewInt64HistogramConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*siHistogram)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Histogram), nil + } + i := &siHistogram{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -212,14 +221,17 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met return m.delegate.Int64Gauge(name, options...) } - i := &siGauge{name: name, opts: options} cfg := metric.NewInt64GaugeConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*siGauge)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64Gauge), nil + } + i := &siGauge{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -232,14 +244,17 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser return m.delegate.Int64ObservableCounter(name, options...) } - i := &aiCounter{name: name, opts: options} cfg := metric.NewInt64ObservableCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*aiCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableCounter), nil + } + i := &aiCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -252,14 +267,17 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6 return m.delegate.Int64ObservableUpDownCounter(name, options...) } - i := &aiUpDownCounter{name: name, opts: options} cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*aiUpDownCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableUpDownCounter), nil + } + i := &aiUpDownCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -272,14 +290,17 @@ func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64Observa return m.delegate.Int64ObservableGauge(name, options...) } - i := &aiGauge{name: name, opts: options} cfg := metric.NewInt64ObservableGaugeConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*aiGauge)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Int64ObservableGauge), nil + } + i := &aiGauge{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -292,14 +313,17 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti return m.delegate.Float64Counter(name, options...) } - i := &sfCounter{name: name, opts: options} cfg := metric.NewFloat64CounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*sfCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Counter), nil + } + i := &sfCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -312,14 +336,17 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow return m.delegate.Float64UpDownCounter(name, options...) } - i := &sfUpDownCounter{name: name, opts: options} cfg := metric.NewFloat64UpDownCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*sfUpDownCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64UpDownCounter), nil + } + i := &sfUpDownCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -332,14 +359,17 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram return m.delegate.Float64Histogram(name, options...) } - i := &sfHistogram{name: name, opts: options} cfg := metric.NewFloat64HistogramConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*sfHistogram)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Histogram), nil + } + i := &sfHistogram{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -352,14 +382,17 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) return m.delegate.Float64Gauge(name, options...) } - i := &sfGauge{name: name, opts: options} cfg := metric.NewFloat64GaugeConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*sfGauge)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64Gauge), nil + } + i := &sfGauge{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -372,14 +405,17 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O return m.delegate.Float64ObservableCounter(name, options...) } - i := &afCounter{name: name, opts: options} cfg := metric.NewFloat64ObservableCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*afCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableCounter), nil + } + i := &afCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -392,14 +428,17 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl return m.delegate.Float64ObservableUpDownCounter(name, options...) } - i := &afUpDownCounter{name: name, opts: options} cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*afUpDownCounter)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableUpDownCounter), nil + } + i := &afUpDownCounter{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -412,14 +451,17 @@ func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64Obs return m.delegate.Float64ObservableGauge(name, options...) } - i := &afGauge{name: name, opts: options} cfg := metric.NewFloat64ObservableGaugeConfig(options...) id := instID{ name: name, - kind: reflect.TypeOf(i), + kind: reflect.TypeOf((*afGauge)(nil)), description: cfg.Description(), unit: cfg.Unit(), } + if f, ok := m.instruments[id]; ok { + return f.(metric.Float64ObservableGauge), nil + } + i := &afGauge{name: name, opts: options} m.instruments[id] = i return i, nil } @@ -487,6 +529,7 @@ func (c *registration) setDelegate(m metric.Meter) { reg, err := m.RegisterCallback(c.function, insts...) if err != nil { GetErrorHandler().Handle(err) + return } c.unreg = reg.Unregister diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index 9b1da2c02b9..b2fe3e41d3b 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -20,7 +20,8 @@ func RawToBool(r uint64) bool { } func Int64ToRaw(i int64) uint64 { - return uint64(i) + // Assumes original was a valid int64 (overflow not checked). + return uint64(i) // nolint: gosec } func RawToInt64(r uint64) int64 { diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go index ea52e402331..a535782e1d9 100644 --- a/vendor/go.opentelemetry.io/otel/metric/instrument.go +++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go @@ -351,7 +351,7 @@ func WithAttributeSet(attributes attribute.Set) MeasurementOption { // // cp := make([]attribute.KeyValue, len(attributes)) // copy(cp, attributes) -// WithAttributes(attribute.NewSet(cp...)) +// WithAttributeSet(attribute.NewSet(cp...)) // // [attribute.NewSet] may modify the passed attributes so this will make a copy // of attributes before creating a set in order to ensure this function is diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json index 4d36b98cf48..0a29a2f13d8 100644 --- a/vendor/go.opentelemetry.io/otel/renovate.json +++ b/vendor/go.opentelemetry.io/otel/renovate.json @@ -23,6 +23,10 @@ { "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], "groupName": "googleapis" + }, + { + "matchPackageNames": ["golang.org/x/**"], + "groupName": "golang.org/x" } ] } diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh deleted file mode 100644 index e57bf57fce8..00000000000 --- a/vendor/go.opentelemetry.io/otel/verify_examples.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash - -# Copyright The OpenTelemetry Authors -# SPDX-License-Identifier: Apache-2.0 - -set -euo pipefail - -cd $(dirname $0) -TOOLS_DIR=$(pwd)/.tools - -if [ -z "${GOPATH}" ] ; then - printf "GOPATH is not defined.\n" - exit -1 -fi - -if [ ! -d "${GOPATH}" ] ; then - printf "GOPATH ${GOPATH} is invalid \n" - exit -1 -fi - -# Pre-requisites -if ! git diff --quiet; then \ - git status - printf "\n\nError: working tree is not clean\n" - exit -1 -fi - -if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then - printf "$(git log -1)" - printf "\n\nError: HEAD is not pointing to a tagged version" -fi - -make ${TOOLS_DIR}/gojq - -DIR_TMP="${GOPATH}/src/oteltmp/" -rm -rf $DIR_TMP -mkdir -p $DIR_TMP - -printf "Copy examples to ${DIR_TMP}\n" -cp -a ./example ${DIR_TMP} - -# Update go.mod files -printf "Update go.mod: rename module and remove replace\n" - -PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) - -for dir in $PACKAGE_DIRS; do - printf " Update go.mod for $dir\n" - (cd "${DIR_TMP}/${dir}" && \ - # replaces is ("mod1" "mod2" …) - replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ - # strip double quotes - replaces=("${replaces[@]%\"}") && \ - replaces=("${replaces[@]#\"}") && \ - # make an array (-dropreplace=mod1 -dropreplace=mod2 …) - dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ - go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ - go mod tidy) -done -printf "Update done:\n\n" - -# Build directories that contain main package. These directories are different than -# directories that contain go.mod files. -printf "Build examples:\n" -EXAMPLES=$(./get_main_pkgs.sh ./example) -for ex in $EXAMPLES; do - printf " Build $ex in ${DIR_TMP}/${ex}\n" - (cd "${DIR_TMP}/${ex}" && \ - go build .) -done - -# Cleanup -printf "Remove copied files.\n" -rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 78b40f3ed24..6d3c7b1f40e 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.30.0" + return "1.31.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 0c32f4fc46e..cdebdb5eb78 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.30.0 + version: v1.31.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -29,12 +29,12 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.52.0 + version: v0.53.0 modules: - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.6.0 + version: v0.7.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -42,7 +42,7 @@ module-sets: - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp - go.opentelemetry.io/otel/exporters/stdout/stdoutlog experimental-schema: - version: v0.0.9 + version: v0.0.10 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5bbb3321748..109997d77ce 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -49,6 +49,13 @@ type Token struct { // mechanisms for that TokenSource will not be used. Expiry time.Time `json:"expiry,omitempty"` + // ExpiresIn is the OAuth2 wire format "expires_in" field, + // which specifies how many seconds later the token expires, + // relative to an unknown time base approximately around "now". + // It is the application's responsibility to populate + // `Expiry` from `ExpiresIn` when required. + ExpiresIn int64 `json:"expires_in,omitempty"` + // raw optionally contains extra metadata from the server // when updating a token. raw interface{} diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 36e075d21fd..df50575b90a 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.196.0" +const Version = "0.199.0" diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 597daf0a724..6cd48d59733 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -32,6 +32,11 @@ "endpointUrl": "https://storage.europe-west3.rep.googleapis.com/", "location": "europe-west3" }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.europe-west8.rep.googleapis.com/", + "location": "europe-west8" + }, { "description": "Regional Endpoint", "endpointUrl": "https://storage.europe-west9.rep.googleapis.com/", @@ -41,9 +46,54 @@ "description": "Regional Endpoint", "endpointUrl": "https://storage.me-central2.rep.googleapis.com/", "location": "me-central2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-central1.rep.googleapis.com/", + "location": "us-central1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-east1.rep.googleapis.com/", + "location": "us-east1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-east4.rep.googleapis.com/", + "location": "us-east4" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-east5.rep.googleapis.com/", + "location": "us-east5" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-south1.rep.googleapis.com/", + "location": "us-south1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west1.rep.googleapis.com/", + "location": "us-west1" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west2.rep.googleapis.com/", + "location": "us-west2" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west3.rep.googleapis.com/", + "location": "us-west3" + }, + { + "description": "Regional Endpoint", + "endpointUrl": "https://storage.us-west4.rep.googleapis.com/", + "location": "us-west4" } ], - "etag": "\"38363036373236373330353534313035333932\"", + "etag": "\"38303531353337323634333935393838333734\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -2829,6 +2879,11 @@ "location": "query", "type": "string" }, + "restoreToken": { + "description": "Restore token used to differentiate soft-deleted objects with the same name and generation. Only applicable for hierarchical namespace buckets and if softDeleted is set to true. This parameter is optional, and is only required in the rare case when there are multiple soft-deleted objects with the same name and generation.", + "location": "query", + "type": "string" + }, "softDeleted": { "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see [Soft Delete](https://cloud.google.com/storage/docs/soft-delete).", "location": "query", @@ -3304,6 +3359,11 @@ "location": "query", "type": "string" }, + "restoreToken": { + "description": "Restore token used to differentiate sof-deleted objects with the same name and generation. Only applicable for hierarchical namespace buckets. This parameter is optional, and is only required in the rare case when there are multiple soft-deleted objects with the same name and generation.", + "location": "query", + "type": "string" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -4136,7 +4196,7 @@ } } }, - "revision": "20240819", + "revision": "20240916", "rootUrl": "https://storage.googleapis.com/", "schemas": { "AnywhereCache": { @@ -5597,6 +5657,10 @@ }, "type": "object" }, + "restoreToken": { + "description": "Restore token used to differentiate deleted objects with the same name and generation. This field is only returned for deleted objects in hierarchical namespace buckets.", + "type": "string" + }, "retention": { "description": "A collection of object level retention parameters.", "properties": { diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index b16e3f227b2..c7c9321765e 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -2236,6 +2236,10 @@ type Object struct { // Owner: The owner of the object. This will always be the uploader of the // object. Owner *ObjectOwner `json:"owner,omitempty"` + // RestoreToken: Restore token used to differentiate deleted objects with the + // same name and generation. This field is only returned for deleted objects in + // hierarchical namespace buckets. + RestoreToken string `json:"restoreToken,omitempty"` // Retention: A collection of object level retention parameters. Retention *ObjectRetention `json:"retention,omitempty"` // RetentionExpirationTime: A server-determined value that specifies the @@ -9961,6 +9965,17 @@ func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { return c } +// RestoreToken sets the optional parameter "restoreToken": Restore token used +// to differentiate soft-deleted objects with the same name and generation. +// Only applicable for hierarchical namespace buckets and if softDeleted is set +// to true. This parameter is optional, and is only required in the rare case +// when there are multiple soft-deleted objects with the same name and +// generation. +func (c *ObjectsGetCall) RestoreToken(restoreToken string) *ObjectsGetCall { + c.urlParams_.Set("restoreToken", restoreToken) + return c +} + // SoftDeleted sets the optional parameter "softDeleted": If true, only // soft-deleted object versions will be listed. The default is false. For more // information, see Soft Delete @@ -11056,6 +11071,16 @@ func (c *ObjectsRestoreCall) Projection(projection string) *ObjectsRestoreCall { return c } +// RestoreToken sets the optional parameter "restoreToken": Restore token used +// to differentiate sof-deleted objects with the same name and generation. Only +// applicable for hierarchical namespace buckets. This parameter is optional, +// and is only required in the rare case when there are multiple soft-deleted +// objects with the same name and generation. +func (c *ObjectsRestoreCall) RestoreToken(restoreToken string) *ObjectsRestoreCall { + c.urlParams_.Set("restoreToken", restoreToken) + return c +} + // UserProject sets the optional parameter "userProject": The project to be // billed for this request. Required for Requester Pays buckets. func (c *ObjectsRestoreCall) UserProject(userProject string) *ObjectsRestoreCall { diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index d2a4f76645a..ff3539d898f 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -247,6 +247,7 @@ func dialPoolNewAuth(ctx context.Context, secure bool, poolSize int, ds *interna DefaultScopes: ds.DefaultScopes, SkipValidation: skipValidation, }, + UniverseDomain: ds.UniverseDomain, }) return pool, err } diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index 2e2b15c6e0c..d5b213e0f08 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -130,6 +130,7 @@ func newClientNewAuth(ctx context.Context, base http.RoundTripper, ds *internal. DefaultScopes: ds.DefaultScopes, SkipValidation: skipValidation, }, + UniverseDomain: ds.UniverseDomain, }) if err != nil { return nil, err diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 3674914f701..4fe0c5eb250 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -17,6 +17,7 @@ limitations under the License. package runtime import ( + "context" "fmt" "net/http" "runtime" @@ -35,7 +36,7 @@ var ( ) // PanicHandlers is a list of functions which will be invoked when a panic happens. -var PanicHandlers = []func(interface{}){logPanic} +var PanicHandlers = []func(context.Context, interface{}){logPanic} // HandleCrash simply catches a crash and logs an error. Meant to be called via // defer. Additional context-specific handlers can be provided, and will be @@ -43,23 +44,54 @@ var PanicHandlers = []func(interface{}){logPanic} // handlers and logging the panic message. // // E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +// +// TODO(pohly): logcheck:context // HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { - for _, fn := range PanicHandlers { - fn(r) - } - for _, fn := range additionalHandlers { - fn(r) - } - if ReallyCrash { - // Actually proceed to panic. - panic(r) + additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers)) + for i, handler := range additionalHandlers { + handler := handler // capture loop variable + additionalHandlersWithContext[i] = func(_ context.Context, r interface{}) { + handler(r) + } } + + handleCrash(context.Background(), r, additionalHandlersWithContext...) + } +} + +// HandleCrashWithContext simply catches a crash and logs an error. Meant to be called via +// defer. Additional context-specific handlers can be provided, and will be +// called in case of panic. HandleCrash actually crashes, after calling the +// handlers and logging the panic message. +// +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +// +// The context is used to determine how to log. +func HandleCrashWithContext(ctx context.Context, additionalHandlers ...func(context.Context, interface{})) { + if r := recover(); r != nil { + handleCrash(ctx, r, additionalHandlers...) + } +} + +// handleCrash is the common implementation of HandleCrash and HandleCrash. +// Having those call a common implementation ensures that the stack depth +// is the same regardless through which path the handlers get invoked. +func handleCrash(ctx context.Context, r any, additionalHandlers ...func(context.Context, interface{})) { + for _, fn := range PanicHandlers { + fn(ctx, r) + } + for _, fn := range additionalHandlers { + fn(ctx, r) + } + if ReallyCrash { + // Actually proceed to panic. + panic(r) } } // logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). -func logPanic(r interface{}) { +func logPanic(ctx context.Context, r interface{}) { if r == http.ErrAbortHandler { // honor the http.ErrAbortHandler sentinel panic value: // ErrAbortHandler is a sentinel panic value to abort a handler. @@ -73,10 +105,20 @@ func logPanic(r interface{}) { const size = 64 << 10 stacktrace := make([]byte, size) stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + + // We don't really know how many call frames to skip because the Go + // panic handler is between us and the code where the panic occurred. + // If it's one function (as in Go 1.21), then skipping four levels + // gets us to the function which called the `defer HandleCrashWithontext(...)`. + logger := klog.FromContext(ctx).WithCallDepth(4) + + // For backwards compatibility, conversion to string + // is handled here instead of defering to the logging + // backend. if _, ok := r.(string); ok { - klog.Errorf("Observed a panic: %s\n%s", r, stacktrace) + logger.Error(nil, "Observed a panic", "panic", r, "stacktrace", string(stacktrace)) } else { - klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace) + logger.Error(nil, "Observed a panic", "panic", fmt.Sprintf("%v", r), "panicGoValue", fmt.Sprintf("%#v", r), "stacktrace", string(stacktrace)) } } @@ -84,35 +126,76 @@ func logPanic(r interface{}) { // error occurs. // TODO(lavalamp): for testability, this and the below HandleError function // should be packaged up into a testable and reusable object. -var ErrorHandlers = []func(error){ +var ErrorHandlers = []ErrorHandler{ logError, - (&rudimentaryErrorBackoff{ - lastErrorTime: time.Now(), - // 1ms was the number folks were able to stomach as a global rate limit. - // If you need to log errors more than 1000 times a second you - // should probably consider fixing your code instead. :) - minPeriod: time.Millisecond, - }).OnError, + func(_ context.Context, _ error, _ string, _ ...interface{}) { + (&rudimentaryErrorBackoff{ + lastErrorTime: time.Now(), + // 1ms was the number folks were able to stomach as a global rate limit. + // If you need to log errors more than 1000 times a second you + // should probably consider fixing your code instead. :) + minPeriod: time.Millisecond, + }).OnError() + }, } +type ErrorHandler func(ctx context.Context, err error, msg string, keysAndValues ...interface{}) + // HandlerError is a method to invoke when a non-user facing piece of code cannot // return an error and needs to indicate it has been ignored. Invoking this method // is preferable to logging the error - the default behavior is to log but the // errors may be sent to a remote server for analysis. +// +// TODO(pohly): logcheck:context // HandleErrorWithContext should be used instead of HandleError in code which supports contextual logging. func HandleError(err error) { // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead if err == nil { return } + handleError(context.Background(), err, "Unhandled Error") +} + +// HandlerErrorWithContext is a method to invoke when a non-user facing piece of code cannot +// return an error and needs to indicate it has been ignored. Invoking this method +// is preferable to logging the error - the default behavior is to log but the +// errors may be sent to a remote server for analysis. The context is used to +// determine how to log the error. +// +// If contextual logging is enabled, the default log output is equivalent to +// +// logr.FromContext(ctx).WithName("UnhandledError").Error(err, msg, keysAndValues...) +// +// Without contextual logging, it is equivalent to: +// +// klog.ErrorS(err, msg, keysAndValues...) +// +// In contrast to HandleError, passing nil for the error is still going to +// trigger a log entry. Don't construct a new error or wrap an error +// with fmt.Errorf. Instead, add additional information via the mssage +// and key/value pairs. +// +// This variant should be used instead of HandleError because it supports +// structured, contextual logging. +func HandleErrorWithContext(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { + handleError(ctx, err, msg, keysAndValues...) +} + +// handleError is the common implementation of HandleError and HandleErrorWithContext. +// Using this common implementation ensures that the stack depth +// is the same regardless through which path the handlers get invoked. +func handleError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { for _, fn := range ErrorHandlers { - fn(err) + fn(ctx, err, msg, keysAndValues...) } } -// logError prints an error with the call stack of the location it was reported -func logError(err error) { - klog.ErrorDepth(2, err) +// logError prints an error with the call stack of the location it was reported. +// It expects to be called as -> HandleError[WithContext] -> handleError -> logError. +func logError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { + logger := klog.FromContext(ctx).WithCallDepth(3) + logger = klog.LoggerWithName(logger, "UnhandledError") + logger.Error(err, msg, keysAndValues...) //nolint:logcheck // logcheck complains about unknown key/value pairs. } type rudimentaryErrorBackoff struct { @@ -125,7 +208,7 @@ type rudimentaryErrorBackoff struct { // OnError will block if it is called more often than the embedded period time. // This will prevent overly tight hot error loops. -func (r *rudimentaryErrorBackoff) OnError(error) { +func (r *rudimentaryErrorBackoff) OnError() { now := time.Now() // start the timer before acquiring the lock r.lastErrorTimeLock.Lock() d := now.Sub(r.lastErrorTime) diff --git a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go index efda7c197fc..1f9567881c7 100644 --- a/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go +++ b/vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go @@ -24,49 +24,66 @@ import ( "golang.org/x/time/rate" ) -type RateLimiter interface { +// Deprecated: RateLimiter is deprecated, use TypedRateLimiter instead. +type RateLimiter TypedRateLimiter[any] + +type TypedRateLimiter[T comparable] interface { // When gets an item and gets to decide how long that item should wait - When(item interface{}) time.Duration + When(item T) time.Duration // Forget indicates that an item is finished being retried. Doesn't matter whether it's for failing // or for success, we'll stop tracking it - Forget(item interface{}) + Forget(item T) // NumRequeues returns back how many failures the item has had - NumRequeues(item interface{}) int + NumRequeues(item T) int } // DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has // both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential +// +// Deprecated: Use DefaultTypedControllerRateLimiter instead. func DefaultControllerRateLimiter() RateLimiter { - return NewMaxOfRateLimiter( - NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), + return DefaultTypedControllerRateLimiter[any]() +} + +// DefaultTypedControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has +// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential +func DefaultTypedControllerRateLimiter[T comparable]() TypedRateLimiter[T] { + return NewTypedMaxOfRateLimiter( + NewTypedItemExponentialFailureRateLimiter[T](5*time.Millisecond, 1000*time.Second), // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) - &BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + &TypedBucketRateLimiter[T]{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, ) } -// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API -type BucketRateLimiter struct { +// Deprecated: BucketRateLimiter is deprecated, use TypedBucketRateLimiter instead. +type BucketRateLimiter = TypedBucketRateLimiter[any] + +// TypedBucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API +type TypedBucketRateLimiter[T comparable] struct { *rate.Limiter } var _ RateLimiter = &BucketRateLimiter{} -func (r *BucketRateLimiter) When(item interface{}) time.Duration { +func (r *TypedBucketRateLimiter[T]) When(item T) time.Duration { return r.Limiter.Reserve().Delay() } -func (r *BucketRateLimiter) NumRequeues(item interface{}) int { +func (r *TypedBucketRateLimiter[T]) NumRequeues(item T) int { return 0 } -func (r *BucketRateLimiter) Forget(item interface{}) { +func (r *TypedBucketRateLimiter[T]) Forget(item T) { } -// ItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit +// Deprecated: ItemExponentialFailureRateLimiter is deprecated, use TypedItemExponentialFailureRateLimiter instead. +type ItemExponentialFailureRateLimiter = TypedItemExponentialFailureRateLimiter[any] + +// TypedItemExponentialFailureRateLimiter does a simple baseDelay*2^ limit // dealing with max failures and expiration are up to the caller -type ItemExponentialFailureRateLimiter struct { +type TypedItemExponentialFailureRateLimiter[T comparable] struct { failuresLock sync.Mutex - failures map[interface{}]int + failures map[T]int baseDelay time.Duration maxDelay time.Duration @@ -74,19 +91,29 @@ type ItemExponentialFailureRateLimiter struct { var _ RateLimiter = &ItemExponentialFailureRateLimiter{} +// Deprecated: NewItemExponentialFailureRateLimiter is deprecated, use NewTypedItemExponentialFailureRateLimiter instead. func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter { - return &ItemExponentialFailureRateLimiter{ - failures: map[interface{}]int{}, + return NewTypedItemExponentialFailureRateLimiter[any](baseDelay, maxDelay) +} + +func NewTypedItemExponentialFailureRateLimiter[T comparable](baseDelay time.Duration, maxDelay time.Duration) TypedRateLimiter[T] { + return &TypedItemExponentialFailureRateLimiter[T]{ + failures: map[T]int{}, baseDelay: baseDelay, maxDelay: maxDelay, } } +// Deprecated: DefaultItemBasedRateLimiter is deprecated, use DefaultTypedItemBasedRateLimiter instead. func DefaultItemBasedRateLimiter() RateLimiter { - return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second) + return DefaultTypedItemBasedRateLimiter[any]() } -func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration { +func DefaultTypedItemBasedRateLimiter[T comparable]() TypedRateLimiter[T] { + return NewTypedItemExponentialFailureRateLimiter[T](time.Millisecond, 1000*time.Second) +} + +func (r *TypedItemExponentialFailureRateLimiter[T]) When(item T) time.Duration { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -107,14 +134,14 @@ func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration return calculated } -func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int { +func (r *TypedItemExponentialFailureRateLimiter[T]) NumRequeues(item T) int { r.failuresLock.Lock() defer r.failuresLock.Unlock() return r.failures[item] } -func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { +func (r *TypedItemExponentialFailureRateLimiter[T]) Forget(item T) { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -122,9 +149,13 @@ func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { } // ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that -type ItemFastSlowRateLimiter struct { +// Deprecated: Use TypedItemFastSlowRateLimiter instead. +type ItemFastSlowRateLimiter = TypedItemFastSlowRateLimiter[any] + +// TypedItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that +type TypedItemFastSlowRateLimiter[T comparable] struct { failuresLock sync.Mutex - failures map[interface{}]int + failures map[T]int maxFastAttempts int fastDelay time.Duration @@ -133,16 +164,21 @@ type ItemFastSlowRateLimiter struct { var _ RateLimiter = &ItemFastSlowRateLimiter{} +// Deprecated: NewItemFastSlowRateLimiter is deprecated, use NewTypedItemFastSlowRateLimiter instead. func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter { - return &ItemFastSlowRateLimiter{ - failures: map[interface{}]int{}, + return NewTypedItemFastSlowRateLimiter[any](fastDelay, slowDelay, maxFastAttempts) +} + +func NewTypedItemFastSlowRateLimiter[T comparable](fastDelay, slowDelay time.Duration, maxFastAttempts int) TypedRateLimiter[T] { + return &TypedItemFastSlowRateLimiter[T]{ + failures: map[T]int{}, fastDelay: fastDelay, slowDelay: slowDelay, maxFastAttempts: maxFastAttempts, } } -func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { +func (r *TypedItemFastSlowRateLimiter[T]) When(item T) time.Duration { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -155,14 +191,14 @@ func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { return r.slowDelay } -func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int { +func (r *TypedItemFastSlowRateLimiter[T]) NumRequeues(item T) int { r.failuresLock.Lock() defer r.failuresLock.Unlock() return r.failures[item] } -func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { +func (r *TypedItemFastSlowRateLimiter[T]) Forget(item T) { r.failuresLock.Lock() defer r.failuresLock.Unlock() @@ -172,11 +208,18 @@ func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { // MaxOfRateLimiter calls every RateLimiter and returns the worst case response // When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items // were separately delayed a longer time. -type MaxOfRateLimiter struct { - limiters []RateLimiter +// +// Deprecated: Use TypedMaxOfRateLimiter instead. +type MaxOfRateLimiter = TypedMaxOfRateLimiter[any] + +// TypedMaxOfRateLimiter calls every RateLimiter and returns the worst case response +// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items +// were separately delayed a longer time. +type TypedMaxOfRateLimiter[T comparable] struct { + limiters []TypedRateLimiter[T] } -func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { +func (r *TypedMaxOfRateLimiter[T]) When(item T) time.Duration { ret := time.Duration(0) for _, limiter := range r.limiters { curr := limiter.When(item) @@ -188,11 +231,16 @@ func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { return ret } -func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter { - return &MaxOfRateLimiter{limiters: limiters} +// Deprecated: NewMaxOfRateLimiter is deprecated, use NewTypedMaxOfRateLimiter instead. +func NewMaxOfRateLimiter(limiters ...TypedRateLimiter[any]) RateLimiter { + return NewTypedMaxOfRateLimiter(limiters...) } -func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { +func NewTypedMaxOfRateLimiter[T comparable](limiters ...TypedRateLimiter[T]) TypedRateLimiter[T] { + return &TypedMaxOfRateLimiter[T]{limiters: limiters} +} + +func (r *TypedMaxOfRateLimiter[T]) NumRequeues(item T) int { ret := 0 for _, limiter := range r.limiters { curr := limiter.NumRequeues(item) @@ -204,23 +252,32 @@ func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { return ret } -func (r *MaxOfRateLimiter) Forget(item interface{}) { +func (r *TypedMaxOfRateLimiter[T]) Forget(item T) { for _, limiter := range r.limiters { limiter.Forget(item) } } // WithMaxWaitRateLimiter have maxDelay which avoids waiting too long -type WithMaxWaitRateLimiter struct { - limiter RateLimiter +// Deprecated: Use TypedWithMaxWaitRateLimiter instead. +type WithMaxWaitRateLimiter = TypedWithMaxWaitRateLimiter[any] + +// TypedWithMaxWaitRateLimiter have maxDelay which avoids waiting too long +type TypedWithMaxWaitRateLimiter[T comparable] struct { + limiter TypedRateLimiter[T] maxDelay time.Duration } +// Deprecated: NewWithMaxWaitRateLimiter is deprecated, use NewTypedWithMaxWaitRateLimiter instead. func NewWithMaxWaitRateLimiter(limiter RateLimiter, maxDelay time.Duration) RateLimiter { - return &WithMaxWaitRateLimiter{limiter: limiter, maxDelay: maxDelay} + return NewTypedWithMaxWaitRateLimiter[any](limiter, maxDelay) +} + +func NewTypedWithMaxWaitRateLimiter[T comparable](limiter TypedRateLimiter[T], maxDelay time.Duration) TypedRateLimiter[T] { + return &TypedWithMaxWaitRateLimiter[T]{limiter: limiter, maxDelay: maxDelay} } -func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration { +func (w TypedWithMaxWaitRateLimiter[T]) When(item T) time.Duration { delay := w.limiter.When(item) if delay > w.maxDelay { return w.maxDelay @@ -229,10 +286,10 @@ func (w WithMaxWaitRateLimiter) When(item interface{}) time.Duration { return delay } -func (w WithMaxWaitRateLimiter) Forget(item interface{}) { +func (w TypedWithMaxWaitRateLimiter[T]) Forget(item T) { w.limiter.Forget(item) } -func (w WithMaxWaitRateLimiter) NumRequeues(item interface{}) int { +func (w TypedWithMaxWaitRateLimiter[T]) NumRequeues(item T) int { return w.limiter.NumRequeues(item) } diff --git a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go index c1df7203021..958b96a80c3 100644 --- a/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/delaying_queue.go @@ -27,14 +27,25 @@ import ( // DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to // requeue items after failures without ending up in a hot-loop. -type DelayingInterface interface { - Interface +// +// Deprecated: use TypedDelayingInterface instead. +type DelayingInterface TypedDelayingInterface[any] + +// TypedDelayingInterface is an Interface that can Add an item at a later time. This makes it easier to +// requeue items after failures without ending up in a hot-loop. +type TypedDelayingInterface[T comparable] interface { + TypedInterface[T] // AddAfter adds an item to the workqueue after the indicated duration has passed - AddAfter(item interface{}, duration time.Duration) + AddAfter(item T, duration time.Duration) } // DelayingQueueConfig specifies optional configurations to customize a DelayingInterface. -type DelayingQueueConfig struct { +// +// Deprecated: use TypedDelayingQueueConfig instead. +type DelayingQueueConfig = TypedDelayingQueueConfig[any] + +// TypedDelayingQueueConfig specifies optional configurations to customize a DelayingInterface. +type TypedDelayingQueueConfig[T comparable] struct { // Name for the queue. If unnamed, the metrics will not be registered. Name string @@ -46,25 +57,42 @@ type DelayingQueueConfig struct { Clock clock.WithTicker // Queue optionally allows injecting custom queue Interface instead of the default one. - Queue Interface + Queue TypedInterface[T] } // NewDelayingQueue constructs a new workqueue with delayed queuing ability. // NewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use // NewDelayingQueueWithConfig instead and specify a name. +// +// Deprecated: use TypedNewDelayingQueue instead. func NewDelayingQueue() DelayingInterface { return NewDelayingQueueWithConfig(DelayingQueueConfig{}) } +// TypedNewDelayingQueue constructs a new workqueue with delayed queuing ability. +// TypedNewDelayingQueue does not emit metrics. For use with a MetricsProvider, please use +// TypedNewDelayingQueueWithConfig instead and specify a name. +func TypedNewDelayingQueue[T comparable]() TypedDelayingInterface[T] { + return NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{}) +} + // NewDelayingQueueWithConfig constructs a new workqueue with options to // customize different properties. +// +// Deprecated: use TypedNewDelayingQueueWithConfig instead. func NewDelayingQueueWithConfig(config DelayingQueueConfig) DelayingInterface { + return NewTypedDelayingQueueWithConfig[any](config) +} + +// NewTypedDelayingQueueWithConfig constructs a new workqueue with options to +// customize different properties. +func NewTypedDelayingQueueWithConfig[T comparable](config TypedDelayingQueueConfig[T]) TypedDelayingInterface[T] { if config.Clock == nil { config.Clock = clock.RealClock{} } if config.Queue == nil { - config.Queue = NewWithConfig(QueueConfig{ + config.Queue = NewTypedWithConfig[T](TypedQueueConfig[T]{ Name: config.Name, MetricsProvider: config.MetricsProvider, Clock: config.Clock, @@ -100,9 +128,9 @@ func NewDelayingQueueWithCustomClock(clock clock.WithTicker, name string) Delayi }) } -func newDelayingQueue(clock clock.WithTicker, q Interface, name string, provider MetricsProvider) *delayingType { - ret := &delayingType{ - Interface: q, +func newDelayingQueue[T comparable](clock clock.WithTicker, q TypedInterface[T], name string, provider MetricsProvider) *delayingType[T] { + ret := &delayingType[T]{ + TypedInterface: q, clock: clock, heartbeat: clock.NewTicker(maxWait), stopCh: make(chan struct{}), @@ -115,8 +143,8 @@ func newDelayingQueue(clock clock.WithTicker, q Interface, name string, provider } // delayingType wraps an Interface and provides delayed re-enquing -type delayingType struct { - Interface +type delayingType[T comparable] struct { + TypedInterface[T] // clock tracks time for delayed firing clock clock.Clock @@ -193,16 +221,16 @@ func (pq waitForPriorityQueue) Peek() interface{} { // ShutDown stops the queue. After the queue drains, the returned shutdown bool // on Get() will be true. This method may be invoked more than once. -func (q *delayingType) ShutDown() { +func (q *delayingType[T]) ShutDown() { q.stopOnce.Do(func() { - q.Interface.ShutDown() + q.TypedInterface.ShutDown() close(q.stopCh) q.heartbeat.Stop() }) } // AddAfter adds the given item to the work queue after the given delay -func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { +func (q *delayingType[T]) AddAfter(item T, duration time.Duration) { // don't add if we're already shutting down if q.ShuttingDown() { return @@ -229,7 +257,7 @@ func (q *delayingType) AddAfter(item interface{}, duration time.Duration) { const maxWait = 10 * time.Second // waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added. -func (q *delayingType) waitingLoop() { +func (q *delayingType[T]) waitingLoop() { defer utilruntime.HandleCrash() // Make a placeholder channel to use when there are no items in our list @@ -244,7 +272,7 @@ func (q *delayingType) waitingLoop() { waitingEntryByData := map[t]*waitFor{} for { - if q.Interface.ShuttingDown() { + if q.TypedInterface.ShuttingDown() { return } @@ -258,7 +286,7 @@ func (q *delayingType) waitingLoop() { } entry = heap.Pop(waitingForQueue).(*waitFor) - q.Add(entry.data) + q.Add(entry.data.(T)) delete(waitingEntryByData, entry.data) } @@ -287,7 +315,7 @@ func (q *delayingType) waitingLoop() { if waitEntry.readyAt.After(q.clock.Now()) { insert(waitingForQueue, waitingEntryByData, waitEntry) } else { - q.Add(waitEntry.data) + q.Add(waitEntry.data.(T)) } drained := false @@ -297,7 +325,7 @@ func (q *delayingType) waitingLoop() { if waitEntry.readyAt.After(q.clock.Now()) { insert(waitingForQueue, waitingEntryByData, waitEntry) } else { - q.Add(waitEntry.data) + q.Add(waitEntry.data.(T)) } default: drained = true diff --git a/vendor/k8s.io/client-go/util/workqueue/queue.go b/vendor/k8s.io/client-go/util/workqueue/queue.go index a363d1afb4f..ff715482c11 100644 --- a/vendor/k8s.io/client-go/util/workqueue/queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/queue.go @@ -23,18 +23,66 @@ import ( "k8s.io/utils/clock" ) -type Interface interface { - Add(item interface{}) +// Deprecated: Interface is deprecated, use TypedInterface instead. +type Interface TypedInterface[any] + +type TypedInterface[T comparable] interface { + Add(item T) Len() int - Get() (item interface{}, shutdown bool) - Done(item interface{}) + Get() (item T, shutdown bool) + Done(item T) ShutDown() ShutDownWithDrain() ShuttingDown() bool } +// Queue is the underlying storage for items. The functions below are always +// called from the same goroutine. +type Queue[T comparable] interface { + // Touch can be hooked when an existing item is added again. This may be + // useful if the implementation allows priority change for the given item. + Touch(item T) + // Push adds a new item. + Push(item T) + // Len tells the total number of items. + Len() int + // Pop retrieves an item. + Pop() (item T) +} + +// DefaultQueue is a slice based FIFO queue. +func DefaultQueue[T comparable]() Queue[T] { + return new(queue[T]) +} + +// queue is a slice which implements Queue. +type queue[T comparable] []T + +func (q *queue[T]) Touch(item T) {} + +func (q *queue[T]) Push(item T) { + *q = append(*q, item) +} + +func (q *queue[T]) Len() int { + return len(*q) +} + +func (q *queue[T]) Pop() (item T) { + item = (*q)[0] + + // The underlying array still exists and reference this object, so the object will not be garbage collected. + (*q)[0] = *new(T) + *q = (*q)[1:] + + return item +} + // QueueConfig specifies optional configurations to customize an Interface. -type QueueConfig struct { +// Deprecated: use TypedQueueConfig instead. +type QueueConfig = TypedQueueConfig[any] + +type TypedQueueConfig[T comparable] struct { // Name for the queue. If unnamed, the metrics will not be registered. Name string @@ -44,18 +92,38 @@ type QueueConfig struct { // Clock ability to inject real or fake clock for testing purposes. Clock clock.WithTicker + + // Queue provides the underlying queue to use. It is optional and defaults to slice based FIFO queue. + Queue Queue[T] } // New constructs a new work queue (see the package comment). +// +// Deprecated: use NewTyped instead. func New() *Type { return NewWithConfig(QueueConfig{ Name: "", }) } +// NewTyped constructs a new work queue (see the package comment). +func NewTyped[T comparable]() *Typed[T] { + return NewTypedWithConfig(TypedQueueConfig[T]{ + Name: "", + }) +} + // NewWithConfig constructs a new workqueue with ability to // customize different properties. +// +// Deprecated: use NewTypedWithConfig instead. func NewWithConfig(config QueueConfig) *Type { + return NewTypedWithConfig(config) +} + +// NewTypedWithConfig constructs a new workqueue with ability to +// customize different properties. +func NewTypedWithConfig[T comparable](config TypedQueueConfig[T]) *Typed[T] { return newQueueWithConfig(config, defaultUnfinishedWorkUpdatePeriod) } @@ -69,7 +137,7 @@ func NewNamed(name string) *Type { // newQueueWithConfig constructs a new named workqueue // with the ability to customize different properties for testing purposes -func newQueueWithConfig(config QueueConfig, updatePeriod time.Duration) *Type { +func newQueueWithConfig[T comparable](config TypedQueueConfig[T], updatePeriod time.Duration) *Typed[T] { var metricsFactory *queueMetricsFactory if config.MetricsProvider != nil { metricsFactory = &queueMetricsFactory{ @@ -83,18 +151,24 @@ func newQueueWithConfig(config QueueConfig, updatePeriod time.Duration) *Type { config.Clock = clock.RealClock{} } + if config.Queue == nil { + config.Queue = DefaultQueue[T]() + } + return newQueue( config.Clock, + config.Queue, metricsFactory.newQueueMetrics(config.Name, config.Clock), updatePeriod, ) } -func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Duration) *Type { - t := &Type{ +func newQueue[T comparable](c clock.WithTicker, queue Queue[T], metrics queueMetrics, updatePeriod time.Duration) *Typed[T] { + t := &Typed[T]{ clock: c, - dirty: set{}, - processing: set{}, + queue: queue, + dirty: set[T]{}, + processing: set[T]{}, cond: sync.NewCond(&sync.Mutex{}), metrics: metrics, unfinishedWorkUpdatePeriod: updatePeriod, @@ -112,20 +186,23 @@ func newQueue(c clock.WithTicker, metrics queueMetrics, updatePeriod time.Durati const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond // Type is a work queue (see the package comment). -type Type struct { +// Deprecated: Use Typed instead. +type Type = Typed[any] + +type Typed[t comparable] struct { // queue defines the order in which we will work on items. Every // element of queue should be in the dirty set and not in the // processing set. - queue []t + queue Queue[t] // dirty defines all of the items that need to be processed. - dirty set + dirty set[t] // Things that are currently being processed are in the processing set. // These things may be simultaneously in the dirty set. When we finish // processing something and remove it from this set, we'll check if // it's in the dirty set, and if so, add it to the queue. - processing set + processing set[t] cond *sync.Cond @@ -140,33 +217,38 @@ type Type struct { type empty struct{} type t interface{} -type set map[t]empty +type set[t comparable] map[t]empty -func (s set) has(item t) bool { +func (s set[t]) has(item t) bool { _, exists := s[item] return exists } -func (s set) insert(item t) { +func (s set[t]) insert(item t) { s[item] = empty{} } -func (s set) delete(item t) { +func (s set[t]) delete(item t) { delete(s, item) } -func (s set) len() int { +func (s set[t]) len() int { return len(s) } // Add marks item as needing processing. -func (q *Type) Add(item interface{}) { +func (q *Typed[T]) Add(item T) { q.cond.L.Lock() defer q.cond.L.Unlock() if q.shuttingDown { return } if q.dirty.has(item) { + // the same item is added again before it is processed, call the Touch + // function if the queue cares about it (for e.g, reset its priority) + if !q.processing.has(item) { + q.queue.Touch(item) + } return } @@ -177,37 +259,34 @@ func (q *Type) Add(item interface{}) { return } - q.queue = append(q.queue, item) + q.queue.Push(item) q.cond.Signal() } // Len returns the current queue length, for informational purposes only. You // shouldn't e.g. gate a call to Add() or Get() on Len() being a particular // value, that can't be synchronized properly. -func (q *Type) Len() int { +func (q *Typed[T]) Len() int { q.cond.L.Lock() defer q.cond.L.Unlock() - return len(q.queue) + return q.queue.Len() } // Get blocks until it can return an item to be processed. If shutdown = true, // the caller should end their goroutine. You must call Done with item when you // have finished processing it. -func (q *Type) Get() (item interface{}, shutdown bool) { +func (q *Typed[T]) Get() (item T, shutdown bool) { q.cond.L.Lock() defer q.cond.L.Unlock() - for len(q.queue) == 0 && !q.shuttingDown { + for q.queue.Len() == 0 && !q.shuttingDown { q.cond.Wait() } - if len(q.queue) == 0 { + if q.queue.Len() == 0 { // We must be shutting down. - return nil, true + return *new(T), true } - item = q.queue[0] - // The underlying array still exists and reference this object, so the object will not be garbage collected. - q.queue[0] = nil - q.queue = q.queue[1:] + item = q.queue.Pop() q.metrics.get(item) @@ -220,7 +299,7 @@ func (q *Type) Get() (item interface{}, shutdown bool) { // Done marks item as done processing, and if it has been marked as dirty again // while it was being processed, it will be re-added to the queue for // re-processing. -func (q *Type) Done(item interface{}) { +func (q *Typed[T]) Done(item T) { q.cond.L.Lock() defer q.cond.L.Unlock() @@ -228,7 +307,7 @@ func (q *Type) Done(item interface{}) { q.processing.delete(item) if q.dirty.has(item) { - q.queue = append(q.queue, item) + q.queue.Push(item) q.cond.Signal() } else if q.processing.len() == 0 { q.cond.Signal() @@ -237,7 +316,7 @@ func (q *Type) Done(item interface{}) { // ShutDown will cause q to ignore all new items added to it and // immediately instruct the worker goroutines to exit. -func (q *Type) ShutDown() { +func (q *Typed[T]) ShutDown() { q.cond.L.Lock() defer q.cond.L.Unlock() @@ -255,7 +334,7 @@ func (q *Type) ShutDown() { // indefinitely. It is, however, safe to call ShutDown after having called // ShutDownWithDrain, as to force the queue shut down to terminate immediately // without waiting for the drainage. -func (q *Type) ShutDownWithDrain() { +func (q *Typed[T]) ShutDownWithDrain() { q.cond.L.Lock() defer q.cond.L.Unlock() @@ -268,14 +347,14 @@ func (q *Type) ShutDownWithDrain() { } } -func (q *Type) ShuttingDown() bool { +func (q *Typed[T]) ShuttingDown() bool { q.cond.L.Lock() defer q.cond.L.Unlock() return q.shuttingDown } -func (q *Type) updateUnfinishedWorkLoop() { +func (q *Typed[T]) updateUnfinishedWorkLoop() { t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod) defer t.Stop() for range t.C() { diff --git a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go index 3e4016fb04f..fe45afa5a42 100644 --- a/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go +++ b/vendor/k8s.io/client-go/util/workqueue/rate_limiting_queue.go @@ -19,24 +19,33 @@ package workqueue import "k8s.io/utils/clock" // RateLimitingInterface is an interface that rate limits items being added to the queue. -type RateLimitingInterface interface { - DelayingInterface +// +// Deprecated: Use TypedRateLimitingInterface instead. +type RateLimitingInterface TypedRateLimitingInterface[any] + +// TypedRateLimitingInterface is an interface that rate limits items being added to the queue. +type TypedRateLimitingInterface[T comparable] interface { + TypedDelayingInterface[T] // AddRateLimited adds an item to the workqueue after the rate limiter says it's ok - AddRateLimited(item interface{}) + AddRateLimited(item T) // Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing // or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you // still have to call `Done` on the queue. - Forget(item interface{}) + Forget(item T) // NumRequeues returns back how many times the item was requeued - NumRequeues(item interface{}) int + NumRequeues(item T) int } // RateLimitingQueueConfig specifies optional configurations to customize a RateLimitingInterface. +// +// Deprecated: Use TypedRateLimitingQueueConfig instead. +type RateLimitingQueueConfig = TypedRateLimitingQueueConfig[any] -type RateLimitingQueueConfig struct { +// TypedRateLimitingQueueConfig specifies optional configurations to customize a TypedRateLimitingInterface. +type TypedRateLimitingQueueConfig[T comparable] struct { // Name for the queue. If unnamed, the metrics will not be registered. Name string @@ -48,36 +57,55 @@ type RateLimitingQueueConfig struct { Clock clock.WithTicker // DelayingQueue optionally allows injecting custom delaying queue DelayingInterface instead of the default one. - DelayingQueue DelayingInterface + DelayingQueue TypedDelayingInterface[T] } // NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability // Remember to call Forget! If you don't, you may end up tracking failures forever. // NewRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use // NewRateLimitingQueueWithConfig instead and specify a name. +// +// Deprecated: Use NewTypedRateLimitingQueue instead. func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface { return NewRateLimitingQueueWithConfig(rateLimiter, RateLimitingQueueConfig{}) } +// NewTypedRateLimitingQueue constructs a new workqueue with rateLimited queuing ability +// Remember to call Forget! If you don't, you may end up tracking failures forever. +// NewTypedRateLimitingQueue does not emit metrics. For use with a MetricsProvider, please use +// NewTypedRateLimitingQueueWithConfig instead and specify a name. +func NewTypedRateLimitingQueue[T comparable](rateLimiter TypedRateLimiter[T]) TypedRateLimitingInterface[T] { + return NewTypedRateLimitingQueueWithConfig(rateLimiter, TypedRateLimitingQueueConfig[T]{}) +} + // NewRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability // with options to customize different properties. // Remember to call Forget! If you don't, you may end up tracking failures forever. +// +// Deprecated: Use NewTypedRateLimitingQueueWithConfig instead. func NewRateLimitingQueueWithConfig(rateLimiter RateLimiter, config RateLimitingQueueConfig) RateLimitingInterface { + return NewTypedRateLimitingQueueWithConfig(rateLimiter, config) +} + +// NewTypedRateLimitingQueueWithConfig constructs a new workqueue with rateLimited queuing ability +// with options to customize different properties. +// Remember to call Forget! If you don't, you may end up tracking failures forever. +func NewTypedRateLimitingQueueWithConfig[T comparable](rateLimiter TypedRateLimiter[T], config TypedRateLimitingQueueConfig[T]) TypedRateLimitingInterface[T] { if config.Clock == nil { config.Clock = clock.RealClock{} } if config.DelayingQueue == nil { - config.DelayingQueue = NewDelayingQueueWithConfig(DelayingQueueConfig{ + config.DelayingQueue = NewTypedDelayingQueueWithConfig(TypedDelayingQueueConfig[T]{ Name: config.Name, MetricsProvider: config.MetricsProvider, Clock: config.Clock, }) } - return &rateLimitingType{ - DelayingInterface: config.DelayingQueue, - rateLimiter: rateLimiter, + return &rateLimitingType[T]{ + TypedDelayingInterface: config.DelayingQueue, + rateLimiter: rateLimiter, } } @@ -99,21 +127,21 @@ func NewRateLimitingQueueWithDelayingInterface(di DelayingInterface, rateLimiter } // rateLimitingType wraps an Interface and provides rateLimited re-enquing -type rateLimitingType struct { - DelayingInterface +type rateLimitingType[T comparable] struct { + TypedDelayingInterface[T] - rateLimiter RateLimiter + rateLimiter TypedRateLimiter[T] } // AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok -func (q *rateLimitingType) AddRateLimited(item interface{}) { - q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item)) +func (q *rateLimitingType[T]) AddRateLimited(item T) { + q.TypedDelayingInterface.AddAfter(item, q.rateLimiter.When(item)) } -func (q *rateLimitingType) NumRequeues(item interface{}) int { +func (q *rateLimitingType[T]) NumRequeues(item T) int { return q.rateLimiter.NumRequeues(item) } -func (q *rateLimitingType) Forget(item interface{}) { +func (q *rateLimitingType[T]) Forget(item T) { q.rateLimiter.Forget(item) } diff --git a/vendor/k8s.io/utils/net/multi_listen.go b/vendor/k8s.io/utils/net/multi_listen.go new file mode 100644 index 00000000000..7cb7795beca --- /dev/null +++ b/vendor/k8s.io/utils/net/multi_listen.go @@ -0,0 +1,195 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "context" + "fmt" + "net" + "sync" +) + +// connErrPair pairs conn and error which is returned by accept on sub-listeners. +type connErrPair struct { + conn net.Conn + err error +} + +// multiListener implements net.Listener +type multiListener struct { + listeners []net.Listener + wg sync.WaitGroup + + // connCh passes accepted connections, from child listeners to parent. + connCh chan connErrPair + // stopCh communicates from parent to child listeners. + stopCh chan struct{} +} + +// compile time check to ensure *multiListener implements net.Listener +var _ net.Listener = &multiListener{} + +// MultiListen returns net.Listener which can listen on and accept connections for +// the given network on multiple addresses. Internally it uses stdlib to create +// sub-listener and multiplexes connection requests using go-routines. +// The network must be "tcp", "tcp4" or "tcp6". +// It follows the semantics of net.Listen that primarily means: +// 1. If the host is an unspecified/zero IP address with "tcp" network, MultiListen +// listens on all available unicast and anycast IP addresses of the local system. +// 2. Use "tcp4" or "tcp6" to exclusively listen on IPv4 or IPv6 family, respectively. +// 3. The host can accept names (e.g, localhost) and it will create a listener for at +// most one of the host's IP. +func MultiListen(ctx context.Context, network string, addrs ...string) (net.Listener, error) { + var lc net.ListenConfig + return multiListen( + ctx, + network, + addrs, + func(ctx context.Context, network, address string) (net.Listener, error) { + return lc.Listen(ctx, network, address) + }) +} + +// multiListen implements MultiListen by consuming stdlib functions as dependency allowing +// mocking for unit-testing. +func multiListen( + ctx context.Context, + network string, + addrs []string, + listenFunc func(ctx context.Context, network, address string) (net.Listener, error), +) (net.Listener, error) { + if !(network == "tcp" || network == "tcp4" || network == "tcp6") { + return nil, fmt.Errorf("network %q not supported", network) + } + if len(addrs) == 0 { + return nil, fmt.Errorf("no address provided to listen on") + } + + ml := &multiListener{ + connCh: make(chan connErrPair), + stopCh: make(chan struct{}), + } + for _, addr := range addrs { + l, err := listenFunc(ctx, network, addr) + if err != nil { + // close all the sub-listeners and exit + _ = ml.Close() + return nil, err + } + ml.listeners = append(ml.listeners, l) + } + + for _, l := range ml.listeners { + ml.wg.Add(1) + go func(l net.Listener) { + defer ml.wg.Done() + for { + // Accept() is blocking, unless ml.Close() is called, in which + // case it will return immediately with an error. + conn, err := l.Accept() + // This assumes that ANY error from Accept() will terminate the + // sub-listener. We could maybe be more precise, but it + // doesn't seem necessary. + terminate := err != nil + + select { + case ml.connCh <- connErrPair{conn: conn, err: err}: + case <-ml.stopCh: + // In case we accepted a connection AND were stopped, and + // this select-case was chosen, just throw away the + // connection. This avoids potentially blocking on connCh + // or leaking a connection. + if conn != nil { + _ = conn.Close() + } + terminate = true + } + // Make sure we don't loop on Accept() returning an error and + // the select choosing the channel case. + if terminate { + return + } + } + }(l) + } + return ml, nil +} + +// Accept implements net.Listener. It waits for and returns a connection from +// any of the sub-listener. +func (ml *multiListener) Accept() (net.Conn, error) { + // wait for any sub-listener to enqueue an accepted connection + connErr, ok := <-ml.connCh + if !ok { + // The channel will be closed only when Close() is called on the + // multiListener. Closing of this channel implies that all + // sub-listeners are also closed, which causes a "use of closed + // network connection" error on their Accept() calls. We return the + // same error for multiListener.Accept() if multiListener.Close() + // has already been called. + return nil, fmt.Errorf("use of closed network connection") + } + return connErr.conn, connErr.err +} + +// Close implements net.Listener. It will close all sub-listeners and wait for +// the go-routines to exit. +func (ml *multiListener) Close() error { + // Make sure this can be called repeatedly without explosions. + select { + case <-ml.stopCh: + return fmt.Errorf("use of closed network connection") + default: + } + + // Tell all sub-listeners to stop. + close(ml.stopCh) + + // Closing the listeners causes Accept() to immediately return an error in + // the sub-listener go-routines. + for _, l := range ml.listeners { + _ = l.Close() + } + + // Wait for all the sub-listener go-routines to exit. + ml.wg.Wait() + close(ml.connCh) + + // Drain any already-queued connections. + for connErr := range ml.connCh { + if connErr.conn != nil { + _ = connErr.conn.Close() + } + } + return nil +} + +// Addr is an implementation of the net.Listener interface. It always returns +// the address of the first listener. Callers should use conn.LocalAddr() to +// obtain the actual local address of the sub-listener. +func (ml *multiListener) Addr() net.Addr { + return ml.listeners[0].Addr() +} + +// Addrs is like Addr, but returns the address for all registered listeners. +func (ml *multiListener) Addrs() []net.Addr { + var ret []net.Addr + for _, l := range ml.listeners { + ret = append(ret, l.Addr()) + } + return ret +} diff --git a/vendor/modules.txt b/vendor/modules.txt index b7f2636050d..e53cb423962 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,7 +4,7 @@ cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/auth v0.9.3 +# cloud.google.com/go/auth v0.9.5 ## explicit; go 1.21 cloud.google.com/go/auth cloud.google.com/go/auth/credentials @@ -16,6 +16,7 @@ cloud.google.com/go/auth/credentials/internal/stsexchange cloud.google.com/go/auth/grpctransport cloud.google.com/go/auth/httptransport cloud.google.com/go/auth/internal +cloud.google.com/go/auth/internal/compute cloud.google.com/go/auth/internal/credsfile cloud.google.com/go/auth/internal/jwt cloud.google.com/go/auth/internal/transport @@ -23,8 +24,8 @@ cloud.google.com/go/auth/internal/transport/cert # cloud.google.com/go/auth/oauth2adapt v0.2.4 ## explicit; go 1.20 cloud.google.com/go/auth/oauth2adapt -# cloud.google.com/go/compute/metadata v0.5.0 -## explicit; go 1.20 +# cloud.google.com/go/compute/metadata v0.5.2 +## explicit; go 1.21 cloud.google.com/go/compute/metadata # cloud.google.com/go/iam v1.2.0 ## explicit; go 1.21 @@ -485,7 +486,7 @@ github.com/gogo/status # github.com/golang-jwt/jwt/v5 v5.2.1 ## explicit; go 1.18 github.com/golang-jwt/jwt/v5 -# github.com/golang/glog v1.2.1 +# github.com/golang/glog v1.2.2 ## explicit; go 1.19 github.com/golang/glog github.com/golang/glog/internal/logsink @@ -822,7 +823,13 @@ github.com/mattn/go-isatty # github.com/matttproud/golang_protobuf_extensions v1.0.4 ## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/miekg/dns v1.1.61 +# github.com/mdlayher/socket v0.4.1 +## explicit; go 1.20 +github.com/mdlayher/socket +# github.com/mdlayher/vsock v1.2.1 +## explicit; go 1.20 +github.com/mdlayher/vsock +# github.com/miekg/dns v1.1.62 ## explicit; go 1.19 github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 @@ -988,26 +995,27 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.59.1 -## explicit; go 1.20 +# github.com/prometheus/common v0.60.0 +## explicit; go 1.21 github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/helpers/templates github.com/prometheus/common/model +github.com/prometheus/common/promslog github.com/prometheus/common/route github.com/prometheus/common/version # github.com/prometheus/common/sigv4 v0.1.0 ## explicit; go 1.15 github.com/prometheus/common/sigv4 -# github.com/prometheus/exporter-toolkit v0.11.0 -## explicit; go 1.18 +# github.com/prometheus/exporter-toolkit v0.13.0 +## explicit; go 1.22 github.com/prometheus/exporter-toolkit/web # github.com/prometheus/procfs v0.15.1 ## explicit; go 1.20 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v0.0.0-20241104175756-dea6247a158f +# github.com/prometheus/prometheus v1.99.0 => github.com/grafana/mimir-prometheus v0.0.0-20241107161256-ab952f991472 ## explicit; go 1.22.0 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -1054,10 +1062,12 @@ github.com/prometheus/prometheus/tsdb/tsdbutil github.com/prometheus/prometheus/tsdb/wlog github.com/prometheus/prometheus/util/almost github.com/prometheus/prometheus/util/annotations +github.com/prometheus/prometheus/util/convertnhcb github.com/prometheus/prometheus/util/gate github.com/prometheus/prometheus/util/httputil github.com/prometheus/prometheus/util/jsonutil github.com/prometheus/prometheus/util/logging +github.com/prometheus/prometheus/util/notifications github.com/prometheus/prometheus/util/osutil github.com/prometheus/prometheus/util/pool github.com/prometheus/prometheus/util/stats @@ -1257,7 +1267,7 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/collector/pdata v1.15.0 +# go.opentelemetry.io/collector/pdata v1.16.0 ## explicit; go 1.22.0 go.opentelemetry.io/collector/pdata/internal go.opentelemetry.io/collector/pdata/internal/data @@ -1276,24 +1286,24 @@ go.opentelemetry.io/collector/pdata/internal/otlp go.opentelemetry.io/collector/pdata/pcommon go.opentelemetry.io/collector/pdata/pmetric go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp -# go.opentelemetry.io/collector/semconv v0.105.0 -## explicit; go 1.21.0 +# go.opentelemetry.io/collector/semconv v0.110.0 +## explicit; go 1.22.0 go.opentelemetry.io/collector/semconv/v1.6.1 # go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 ## explicit; go 1.21 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.55.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 ## explicit; go 1.22 go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.55.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 ## explicit; go 1.22 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.30.0 +# go.opentelemetry.io/otel v1.31.0 ## explicit; go 1.22 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -1309,16 +1319,14 @@ go.opentelemetry.io/otel/semconv/v1.18.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/metric v1.30.0 +# go.opentelemetry.io/otel/metric v1.31.0 ## explicit; go 1.22 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.30.0 -## explicit; go 1.22 # go.opentelemetry.io/otel/sdk/metric v1.30.0 ## explicit; go 1.22 -# go.opentelemetry.io/otel/trace v1.30.0 +# go.opentelemetry.io/otel/trace v1.31.0 ## explicit; go 1.22 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -1383,7 +1391,7 @@ golang.org/x/net/netutil golang.org/x/net/publicsuffix golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.22.0 +# golang.org/x/oauth2 v0.23.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -1446,7 +1454,7 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/api v0.196.0 +# google.golang.org/api v0.199.0 ## explicit; go 1.21 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -1468,7 +1476,7 @@ google.golang.org/api/transport/http/internal/propagation ## explicit; go 1.21 google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr -# google.golang.org/genproto/googleapis/api v0.0.0-20240827150818-7e3bb234dfed +# google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 ## explicit; go 1.21 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations @@ -1603,11 +1611,11 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 ## explicit gopkg.in/yaml.v3 -# k8s.io/apimachinery v0.29.3 -## explicit; go 1.21 +# k8s.io/apimachinery v0.31.1 +## explicit; go 1.22.0 k8s.io/apimachinery/pkg/util/runtime -# k8s.io/client-go v0.29.3 -## explicit; go 1.21 +# k8s.io/client-go v0.31.1 +## explicit; go 1.22.0 k8s.io/client-go/tools/metrics k8s.io/client-go/util/workqueue # k8s.io/klog/v2 v2.130.1 @@ -1628,7 +1636,7 @@ k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson k8s.io/kube-openapi/pkg/validation/validate -# k8s.io/utils v0.0.0-20230726121419-3b25d923346b +# k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 ## explicit; go 1.18 k8s.io/utils/clock k8s.io/utils/internal/third_party/forked/golang/net @@ -1671,7 +1679,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/walk sigs.k8s.io/yaml sigs.k8s.io/yaml/goyaml.v2 sigs.k8s.io/yaml/goyaml.v3 -# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20241104175756-dea6247a158f +# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20241107161256-ab952f991472 # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe # gopkg.in/yaml.v3 => github.com/colega/go-yaml-yaml v0.0.0-20220720105220-255a8d16d094 # github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20240531075221-3685f1377d7b